Commit 706a0912 authored by Michael Howitz's avatar Michael Howitz Committed by GitHub

Merge pull request #357 from zopefoundation/config-with-pure-python

* Config with pure python
* Lint the code.
* Add support for Python 3.9 and 3.10.
parents 1f4c6429 f79c50b6
[run]
source = src/ZODB/
parallel = true
omit =
src/ZODB/tests/*
src/ZODB/scripts/tests/*
[report]
exclude_lines =
pragma: nocover
pragma: no cover
if __name__ == ['"]__main__['"]:
assert False
self.fail
# Generated from:
# https://github.com/zopefoundation/meta/tree/master/config/pure-python
#
# EditorConfig Configuration file, for more details see:
# http://EditorConfig.org
# EditorConfig is a convention description, that could be interpreted
# by multiple editors to enforce common coding conventions for specific
# file types
# top-most EditorConfig file:
# Will ignore other EditorConfig files in Home directory or upper tree level.
root = true
[*] # For All Files
# Unix-style newlines with a newline ending every file
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
# Set default charset
charset = utf-8
# Indent style default
indent_style = space
# Max Line Length - a hard line wrap, should be disabled
max_line_length = off
[*.{py,cfg,ini}]
# 4 space indentation
indent_size = 4
[*.{yml,zpt,pt,dtml,zcml}]
# 2 space indentation
indent_size = 2
[{Makefile,.gitmodules}]
# Tab indentation (no size specified, but view as 4 spaces)
indent_style = tab
indent_size = unset
tab_width = unset
# Generated from:
# https://github.com/zopefoundation/meta/tree/master/config/pure-python
name: tests
on:
......@@ -5,27 +7,41 @@ on:
pull_request:
schedule:
- cron: '0 12 * * 0' # run once a week on Sunday
# Allow to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
build:
strategy:
# We want to see all failures:
fail-fast: false
matrix:
os:
- ubuntu
- windows
config:
# [Python version, tox env]
- ["3.9", "lint"]
- ["2.7", "py27"]
- ["3.5", "py35"]
- ["3.6", "py36"]
- ["3.7", "py37"]
- ["3.8", "py38"]
- ["3.8", "py38-pure"]
- ["3.9", "py39"]
- ["3.10", "py310"]
- ["pypy2", "pypy"]
- ["pypy3", "pypy3"]
- ["3.7", "docs"]
- ["3.7", "coverage"]
- ["3.9", "docs"]
- ["3.9", "coverage"]
- ["3.8", "py38-pure"]
exclude:
- { os: windows, config: ["3.9", "lint"] }
- { os: windows, config: ["3.9", "docs"] }
- { os: windows, config: ["3.9", "coverage"] }
- { os: windows, config: ["pypy2", "pypy"] }
runs-on: ubuntu-latest
name: ${{ matrix.config[1] }}
runs-on: ${{ matrix.os }}-latest
name: ${{ matrix.os }}-${{ matrix.config[1] }}
steps:
- uses: actions/checkout@v2
- name: Set up Python
......
bin
eggs
develop-eggs
parts
.installed.cfg
build
doc/_build
__pycache__
# Generated from:
# https://github.com/zopefoundation/meta/tree/master/config/pure-python
*.dll
*.egg-info/
*.profraw
*.pyc
*.pyo
*.so
.tox
.coverage
.coverage.*
nosetests.xml
coverage.xml
*.egg-info
*.egg
dist
testing.log
.eggs/
.dir-locals.el
htmlcov
tmp
*~
.*.swp
.installed.cfg
.mr.developer.cfg
.tox/
.vscode/
__pycache__/
bin/
build/
coverage.xml
develop-eggs/
develop/
dist/
docs/_build
eggs/
etc/
lib/
lib64
log/
parts/
pyvenv.cfg
var/
# Generated from:
# https://github.com/zopefoundation/meta/tree/master/config/pure-python
[meta]
template = "pure-python"
commit-id = "de499940b679dcda1c60b089f30134146da31e9a"
[python]
with-windows = true
with-pypy = true
with-future-python = false
with-legacy-python = true
with-docs = true
with-sphinx-doctests = false
[tox]
use-flake8 = true
testenv-setenv = [
"ZOPE_INTERFACE_STRICT_IRO=1",
]
additional-envlist = [
"py38-pure",
]
testenv-additional = [
"",
"[testenv:py38-pure]",
"basepython = python3.8",
"setenv =",
" PURE_PYTHON = 1",
]
[coverage]
fail-under = 80
[flake8]
additional-config = [
"# F401 imported but unused",
"per-file-ignores =",
" src/ZODB/FileStorage/__init__.py: F401",
" src/ZODB/__init__.py: F401",
]
[manifest]
additional-rules = [
"include COPYING",
"recursive-include docs *.ico",
"recursive-include docs *.png",
"recursive-include docs *.svg",
"recursive-include src *.fs",
"recursive-include src *.rst",
"recursive-include src *.test",
"recursive-include src *.txt",
"recursive-include src *.xml",
]
[check-manifest]
additional-ignores = [
"docs/_build/doctest/*/*/*/*",
"docs/_build/doctest/*/*/*",
"docs/_build/doctest/*/*",
"docs/_build/html/*/*/*/*",
"docs/_build/html/*/*/*",
"docs/_build/html/*/*",
]
[github-actions]
additional-config = [
"- [\"3.8\", \"py38-pure\"]",
]
additional-exclude = [
"- { os: windows, config: [\"pypy2\", \"pypy\"] }",
]
......@@ -2,7 +2,7 @@
Change History
================
5.6.1 (unreleased)
5.7.0 (unreleased)
==================
- Fix ``TypeError: can't concat str to bytes`` when running fsoids.py script with Python 3.
......@@ -24,6 +24,8 @@
- Fix deprecation warnings occurring on Python 3.10.
- Add support for Python 3.9 and 3.10.
5.6.0 (2020-06-11)
==================
......
# Generated from:
# https://github.com/zopefoundation/meta/tree/master/config/pure-python
include *.rst
include *.txt
include *.py
include *.ini
exclude .coveragerc
exclude .travis.yml
exclude appveyor.yml
exclude buildout.cfg
include COPYING
recursive-include doc *
recursive-include src *
include buildout.cfg
include tox.ini
global-exclude *.dll
global-exclude *.pyc
global-exclude *.pyo
global-exclude *.so
global-exclude *~
recursive-include docs *.py
recursive-include docs *.rst
recursive-include docs *.txt
recursive-include docs Makefile
recursive-include src *.py
include COPYING
recursive-include docs *.ico
recursive-include docs *.png
recursive-include docs *.svg
recursive-include src *.fs
recursive-include src *.rst
recursive-include src *.test
recursive-include src *.txt
recursive-include src *.xml
environment:
matrix:
- python: 27
- python: 27-x64
- python: 35
- python: 35-x64
- python: 36
- python: 36-x64
- python: 37
- python: 37-x64
- python: 38
- python: 38-x64
install:
- "SET PATH=C:\\Python%PYTHON%;c:\\Python%PYTHON%\\scripts;%PATH%"
- echo "C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd" /x64 > "C:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\bin\amd64\vcvars64.bat"
- python -m pip install -U pip setuptools wheel
- pip install -U -e .[test]
build_script:
- python -m pip install -U wheel
- python -W ignore setup.py -q bdist_wheel
test_script:
- zope-testrunner --test-path=src -vvv --auto-color
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--setuptools-version",
help="use a specific setuptools version")
options, args = parser.parse_args()
######################################################################
# load/install setuptools
try:
if options.allow_site_packages:
import setuptools
import pkg_resources
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
sys.path[:] = [x for x in sys.path if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
......@@ -56,7 +56,7 @@ master_doc = 'index'
# General information about the project.
project = 'ZODB'
copyright = '2009-2020, Zope Foundation'
copyright = '2009-2021, Zope Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
......
#! /usr/bin/env python
"""Update version numbers and release dates for the next release.
usage: release.py version date
version should be a string like "3.2.0c1"
date should be a string like "23-Sep-2003"
The following files are updated:
- setup.py
- NEWS.txt
- doc/guide/zodb.tex
- src/ZEO/__init__.py
- src/ZEO/version.txt
- src/ZODB/__init__.py
"""
import fileinput
import os
import re
# In file filename, replace the first occurrence of regexp pat with
# string repl.
def replace(filename, pat, repl):
from sys import stderr as e # fileinput hijacks sys.stdout
foundone = False
for line in fileinput.input([filename], inplace=True, backup="~"):
if foundone:
print line,
else:
match = re.search(pat, line)
if match is not None:
foundone = True
new = re.sub(pat, repl, line)
print new,
print >> e, "In %s, replaced:" % filename
print >> e, " ", repr(line)
print >> e, " ", repr(new)
else:
print line,
if not foundone:
print >> e, "*" * 60, "Oops!"
print >> e, " Failed to find %r in %r" % (pat, filename)
# Nothing in our codebase cares about ZEO/version.txt. Jeremy said
# someone asked for it so that a shell script could read up the ZEO
# version easily.
# Before ZODB 3.4, the ZEO version was one smaller than the ZODB version;
# e.g., ZEO 2.2.7 shipped with ZODB 3.2.7. Now ZEO and ZODB share their
# version number.
def write_zeoversion(path, version):
with open(path, "w") as f:
print >> f, version
def main(args):
version, date = args
replace("setup.py",
r'^VERSION = "\S+"$',
'VERSION = "%s"' % version)
replace("src/ZODB/__init__.py",
r'__version__ = "\S+"',
'__version__ = "%s"' % version)
replace("src/ZEO/__init__.py",
r'version = "\S+"',
'version = "%s"' % version)
write_zeoversion("src/ZEO/version.txt", version)
replace("NEWS.txt",
r"^Release date: .*",
"Release date: %s" % date)
replace("doc/guide/zodb.tex",
r"release{\S+}",
"release{%s}" % version)
if __name__ == "__main__":
import sys
main(sys.argv[1:])
# Generated from:
# https://github.com/zopefoundation/meta/tree/master/config/pure-python
[bdist_wheel]
universal = 1
[flake8]
doctests = 1
# F401 imported but unused
per-file-ignores =
src/ZODB/FileStorage/__init__.py: F401
src/ZODB/__init__.py: F401
[check-manifest]
ignore =
.editorconfig
.meta.toml
docs/_build/html/_sources/*
docs/_build/doctest/*/*/*/*
docs/_build/doctest/*/*/*
docs/_build/doctest/*/*
docs/_build/html/*/*/*/*
docs/_build/html/*/*/*
docs/_build/html/*/*
......@@ -13,7 +13,7 @@
##############################################################################
from setuptools import setup, find_packages
version = '5.6.1.dev0'
version = '5.7.0.dev0'
classifiers = """\
Intended Audience :: Developers
......@@ -26,6 +26,8 @@ Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Database
......@@ -35,10 +37,12 @@ Operating System :: Unix
Framework :: ZODB
"""
def read(path):
with open(path) as f:
return f.read()
long_description = read("README.rst") + "\n\n" + read("CHANGES.rst")
tests_require = [
......@@ -67,6 +71,13 @@ setup(
tests_require=tests_require,
extras_require={
'test': tests_require,
'docs': [
'Sphinx',
'ZODB',
'j1m.sphinxautozconfig',
'sphinx_rtd_theme',
'sphinxcontrib_zopeext',
]
},
install_requires=[
'persistent >= 4.4.0',
......
......@@ -72,7 +72,7 @@ class ActivityMonitor(object):
'loads': 0,
'stores': 0,
'connections': 0,
})
})
div = res[0]
div_end = div['end']
......
......@@ -20,7 +20,6 @@ from __future__ import print_function
import time
import logging
import sys
from struct import pack as _structpack, unpack as _structunpack
import zope.interface
......@@ -35,6 +34,7 @@ from ._compat import py2_hasattr
log = logging.getLogger("ZODB.BaseStorage")
class BaseStorage(UndoLogCompatible):
"""Base class that supports storage implementations.
......@@ -74,12 +74,12 @@ class BaseStorage(UndoLogCompatible):
perhaps other things. It is always held when load() is called, so
presumably the load() implementation should also acquire the lock.
"""
_transaction=None # Transaction that is being committed
_tstatus=' ' # Transaction status, used for copying data
_transaction = None # Transaction that is being committed
_tstatus = ' ' # Transaction status, used for copying data
_is_read_only = False
def __init__(self, name, base=None):
self.__name__= name
self.__name__ = name
log.debug("create storage %s", self.__name__)
# Allocate locks:
......@@ -93,7 +93,7 @@ class BaseStorage(UndoLogCompatible):
self._commit_lock_release = self._commit_lock.release
t = time.time()
t = self._ts = TimeStamp(*(time.gmtime(t)[:5] + (t%60,)))
t = self._ts = TimeStamp(*(time.gmtime(t)[:5] + (t % 60,)))
self._tid = t.raw()
# ._oid is the highest oid in use (0 is always in use -- it's
......@@ -122,7 +122,7 @@ class BaseStorage(UndoLogCompatible):
return self.__name__
def getSize(self):
return len(self)*300 # WAG!
return len(self)*300 # WAG!
def history(self, oid, version, length=1, filter=None):
return ()
......@@ -151,7 +151,7 @@ class BaseStorage(UndoLogCompatible):
self._oid = possible_new_max_oid
def registerDB(self, db):
pass # we don't care
pass # we don't care
def isReadOnly(self):
return self._is_read_only
......@@ -279,6 +279,7 @@ class BaseStorage(UndoLogCompatible):
"""
copy(other, self, verbose)
def copy(source, dest, verbose=0):
"""Copy transactions from a source to a destination storage
......@@ -287,7 +288,7 @@ def copy(source, dest, verbose=0):
"""
_ts = None
ok = 1
preindex = {};
preindex = {}
preget = preindex.get
# restore() is a new storage API method which has an identical
# signature to store() except that it does not return anything.
......@@ -310,7 +311,8 @@ def copy(source, dest, verbose=0):
else:
t = TimeStamp(tid)
if t <= _ts:
if ok: print(('Time stamps out of order %s, %s' % (_ts, t)))
if ok:
print(('Time stamps out of order %s, %s' % (_ts, t)))
ok = 0
_ts = t.laterThan(_ts)
tid = _ts.raw()
......@@ -351,23 +353,24 @@ def checkCurrentSerialInTransaction(self, oid, serial, transaction):
raise POSException.ReadConflictError(
oid=oid, serials=(committed_tid, serial))
BaseStorage.checkCurrentSerialInTransaction = checkCurrentSerialInTransaction
@zope.interface.implementer(ZODB.interfaces.IStorageTransactionInformation)
class TransactionRecord(TransactionMetaData):
"""Abstract base class for iterator protocol"""
def __init__(self, tid, status, user, description, extension):
self.tid = tid
self.status = status
TransactionMetaData.__init__(self, user, description, extension)
@zope.interface.implementer(ZODB.interfaces.IStorageRecordInformation)
class DataRecord(object):
"""Abstract base class for iterator protocol"""
version = ''
def __init__(self, oid, tid, data, prev):
......
......@@ -29,9 +29,11 @@ from pickle import PicklingError
logger = logging.getLogger('ZODB.ConflictResolution')
class BadClassName(Exception):
pass
class BadClass(object):
def __init__(self, *args):
......@@ -40,8 +42,11 @@ class BadClass(object):
def __reduce__(self):
raise BadClassName(*self.args)
_class_cache = {}
_class_cache_get = _class_cache.get
def find_global(*args):
cls = _class_cache_get(args, 0)
if cls == 0:
......@@ -60,23 +65,24 @@ def find_global(*args):
if cls == 1:
# Not importable
if (isinstance(args, tuple) and len(args) == 2 and
isinstance(args[0], six.string_types) and
isinstance(args[1], six.string_types)
):
isinstance(args[0], six.string_types) and
isinstance(args[1], six.string_types)):
return BadClass(*args)
else:
raise BadClassName(*args)
return cls
def state(self, oid, serial, prfactory, p=''):
p = p or self.loadSerial(oid, serial)
p = self._crs_untransform_record_data(p)
file = BytesIO(p)
unpickler = PersistentUnpickler(
find_global, prfactory.persistent_load, file)
unpickler.load() # skip the class tuple
unpickler.load() # skip the class tuple
return unpickler.load()
class IPersistentReference(zope.interface.Interface):
'''public contract for references to persistent objects from an object
with conflicts.'''
......@@ -114,10 +120,10 @@ class IPersistentReference(zope.interface.Interface):
have two references to the same object that are spelled with different
data (for instance, one with a class and one without).'''
@zope.interface.implementer(IPersistentReference)
class PersistentReference(object):
weak = False
oid = database_name = klass = None
......@@ -134,7 +140,7 @@ class PersistentReference(object):
self.data = self.oid, klass.args
elif isinstance(data, (bytes, str)):
self.oid = data
else: # a list
else: # a list
reference_type = data[0]
# 'm' = multi_persistent: (database_name, oid, klass)
# 'n' = multi_oid: (database_name, oid)
......@@ -165,11 +171,11 @@ class PersistentReference(object):
def __cmp__(self, other):
if self is other or (
isinstance(other, PersistentReference) and
self.oid == other.oid and
self.database_name == other.database_name and
not self.weak and
not other.weak):
isinstance(other, PersistentReference) and
self.oid == other.oid and
self.database_name == other.database_name and
not self.weak and
not other.weak):
return 0
else:
raise ValueError(
......@@ -211,6 +217,7 @@ class PersistentReference(object):
elif isinstance(data, list) and data[0] == 'm':
return data[1][2]
class PersistentReferenceFactory(object):
data = None
......@@ -218,7 +225,8 @@ class PersistentReferenceFactory(object):
def persistent_load(self, ref):
if self.data is None:
self.data = {}
key = tuple(ref) # lists are not hashable; formats are different enough
# lists are not hashable; formats are different enough
key = tuple(ref)
# even after eliminating list/tuple distinction
r = self.data.get(key, None)
if r is None:
......@@ -227,12 +235,16 @@ class PersistentReferenceFactory(object):
return r
def persistent_id(object):
if getattr(object, '__class__', 0) is not PersistentReference:
return None
return object.data
_unresolvable = {}
def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
committedData=b''):
# class_tuple, old, committed, newstate = ('',''), 0, 0, 0
......@@ -264,13 +276,12 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
_unresolvable[klass] = 1
raise ConflictError
oldData = self.loadSerial(oid, oldSerial)
if not committedData:
committedData = self.loadSerial(oid, committedSerial)
committedData = self.loadSerial(oid, committedSerial)
newstate = unpickler.load()
old = state(self, oid, oldSerial, prfactory, oldData)
old = state(self, oid, oldSerial, prfactory, oldData)
committed = state(self, oid, committedSerial, prfactory, committedData)
resolved = resolve(old, committed, newstate)
......@@ -284,7 +295,7 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
logger.debug(
"Conflict resolution on %s failed with %s: %s",
klass, e.__class__.__name__, str(e))
except:
except: # noqa: E722 do not use bare 'except'
# If anything else went wrong, catch it here and avoid passing an
# arbitrary exception back to the client. The error here will mask
# the original ConflictError. A client can recover from a
......@@ -296,6 +307,7 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
raise ConflictError(oid=oid, serials=(committedSerial, oldSerial),
data=newpickle)
class ConflictResolvingStorage(object):
"Mix-in class that provides conflict resolution handling for storages"
......
......@@ -15,7 +15,6 @@
"""
from __future__ import print_function
import logging
import sys
import tempfile
import warnings
import os
......@@ -43,7 +42,6 @@ from ZODB import POSException
from ZODB.POSException import InvalidObjectReference, ConnectionStateError
from ZODB.POSException import ConflictError, ReadConflictError
from ZODB.POSException import Unsupported, ReadOnlyHistoryError
from ZODB.POSException import POSKeyError
from ZODB.serialize import ObjectWriter, ObjectReader
from ZODB.utils import p64, u64, z64, oid_repr, positive_id
from ZODB import utils
......@@ -56,7 +54,10 @@ from . import valuedoc
global_reset_counter = 0
noop = lambda : None
def noop():
return None
def resetCaches():
"""Causes all connection caches to be reset as connections are reopened.
......@@ -131,7 +132,7 @@ class Connection(ExportImport, object):
# Do we need to join a txn manager?
self._needs_to_join = True
self.transaction_manager = None
self.opened = None # time.time() when DB.open() opened us
self.opened = None # time.time() when DB.open() opened us
self._reset_counter = global_reset_counter
self._load_count = 0 # Number of objects unghosted
......@@ -150,17 +151,17 @@ class Connection(ExportImport, object):
# List of all objects (not oids) registered as modified by the
# persistence machinery.
# All objects of this list are either in _cache or in _added.
self._registered_objects = [] # [object]
self._registered_objects = [] # [object]
# ids and serials of objects for which readCurrent was called
# in a transaction.
self._readCurrent = {} # {oid ->serial}
self._readCurrent = {} # {oid ->serial}
# Dict of oid->obj added explicitly through add(). Used as a
# preliminary cache until commit time when objects are all moved
# to the real _cache. The objects are moved to _creating at
# commit time.
self._added = {} # {oid -> object}
self._added = {} # {oid -> object}
# During commit this is turned into a list, which receives
# objects added as a side-effect of storing a modified object.
......@@ -174,11 +175,11 @@ class Connection(ExportImport, object):
# adding. Used during abort to remove created objects from the
# _cache, and by persistent_id to check that a new object isn't
# reachable from multiple databases.
self._creating = {} # {oid -> implicitly_added_flag}
self._creating = {} # {oid -> implicitly_added_flag}
# List of oids of modified objects, which have to be invalidated
# in the cache on abort and in other connections on finish.
self._modified = [] # [oid]
self._modified = [] # [oid]
# To support importFile(), implemented in the ExportImport base
# class, we need to run _importDuringCommit() from our commit()
......@@ -259,6 +260,7 @@ class Connection(ExportImport, object):
connection._cache.incrgc()
__onCloseCallbacks = None
def onCloseCallback(self, f):
"""Register a callable, f, to be called by close()."""
if self.__onCloseCallbacks is None:
......@@ -272,7 +274,7 @@ class Connection(ExportImport, object):
raise ConnectionStateError("Cannot close a connection joined to "
"a transaction")
self._cache.incrgc() # This is a good time to do some GC
self._cache.incrgc() # This is a good time to do some GC
# Call the close callbacks.
if self.__onCloseCallbacks is not None:
......@@ -281,18 +283,18 @@ class Connection(ExportImport, object):
for f in callbacks:
try:
f()
except: # except what?
except: # noqa: E722 do not use bare 'except'
f = getattr(f, 'im_self', f)
self._log.exception("Close callback failed for %s", f)
self._debug_info = ()
if self.opened and self.transaction_manager is not None:
# transaction_manager could be None if one of the __onCloseCallbacks
# closed the DB already, .e.g, ZODB.connection() does this.
# transaction_manager could be None if one of the
# __onCloseCallbacks closed the DB already, .e.g, ZODB.connection()
# does this.
self.transaction_manager.unregisterSynch(self)
am = self._db._activity_monitor
if am is not None:
am.closedConnection(self)
......@@ -322,7 +324,6 @@ class Connection(ExportImport, object):
# We may have been reused by another thread at this point so
# we can't manipulate or check the state of `self` any more.
def db(self):
"""Returns a handle to the database this connection belongs to."""
return self._db
......@@ -345,7 +346,7 @@ class Connection(ExportImport, object):
new_con = self._db.databases[database_name].open(
transaction_manager=self.transaction_manager,
before=self.before,
)
)
self.connections.update(new_con.connections)
new_con.connections = self.connections
connection = new_con
......@@ -541,12 +542,11 @@ class Connection(ExportImport, object):
serial = getattr(obj, "_p_serial", z64)
if ((serial == z64)
and
((self._savepoint_storage is None)
or (oid not in self._savepoint_storage.creating)
or self._savepoint_storage.creating[oid]
)
):
and
((self._savepoint_storage is None)
or (oid not in self._savepoint_storage.creating)
or self._savepoint_storage.creating[oid]
)):
# obj is a new object
......@@ -574,8 +574,8 @@ class Connection(ExportImport, object):
raise ValueError("Can't commit with opened blobs.")
blobfilename = obj._uncommitted()
if blobfilename is None:
assert serial is not None # See _uncommitted
self._modified.pop() # not modified
assert serial is not None # See _uncommitted
self._modified.pop() # not modified
continue
s = self._storage.storeBlob(oid, serial, p, blobfilename,
'', transaction)
......@@ -593,7 +593,7 @@ class Connection(ExportImport, object):
# serial number for a newly created object
try:
self._cache[oid] = obj
except:
except: # noqa: E722 do not use bare 'except'
# Dang, I bet it's wrapped:
# TODO: Deprecate, then remove, this.
if hasattr(obj, 'aq_base'):
......@@ -609,7 +609,7 @@ class Connection(ExportImport, object):
self._readCurrent.pop(oid, None)
if s:
# savepoint
obj._p_changed = 0 # transition from changed to up-to-date
obj._p_changed = 0 # transition from changed to up-to-date
obj._p_serial = s
def tpc_abort(self, transaction):
......@@ -664,7 +664,6 @@ class Connection(ExportImport, object):
del o._p_jar
del o._p_oid
def tpc_vote(self, transaction):
"""Verify that a data manager can commit the transaction."""
try:
......@@ -685,7 +684,7 @@ class Connection(ExportImport, object):
for oid in s:
obj = self._cache.get(oid)
if obj is not None:
del obj._p_changed # transition from changed to ghost
del obj._p_changed # transition from changed to ghost
def tpc_finish(self, transaction):
"""Indicate confirmation that the transaction is done.
......@@ -769,7 +768,7 @@ class Connection(ExportImport, object):
% (className(obj), oid_repr(oid)))
try:
raise ConnectionStateError(msg)
except:
except: # noqa: E722 do not use bare 'except'
self._log.exception(msg)
raise
......@@ -790,7 +789,7 @@ class Connection(ExportImport, object):
except ConflictError:
raise
except:
except: # noqa: E722 do not use bare 'except'
self._log.exception("Couldn't load state for %s %s",
className(obj), oid_repr(oid))
raise
......@@ -847,7 +846,7 @@ class Connection(ExportImport, object):
# fine everything. some on the lru list, some not
everything = self._cache.cache_data
# remove those items that are on the lru list
for k,v in items:
for k, v in items:
del everything[k]
# return a list of [ghosts....not recently used.....recently used]
return list(everything.items()) + items
......@@ -908,7 +907,7 @@ class Connection(ExportImport, object):
transaction_manager.registerSynch(self)
self._cache.incrgc() # This is a good time to do some GC
self._cache.incrgc() # This is a good time to do some GC
if delegate:
# delegate open to secondary connections
......@@ -1043,7 +1042,7 @@ class Connection(ExportImport, object):
else:
self._storage.store(oid, serial, data, '', transaction)
self._readCurrent.pop(oid, None) # same as in _store_objects()
self._readCurrent.pop(oid, None) # same as in _store_objects()
finally:
src.close()
......@@ -1102,6 +1101,7 @@ class Connection(ExportImport, object):
else:
yield ob._p_oid
@implementer(IDataManagerSavepoint)
class Savepoint(object):
......@@ -1117,13 +1117,12 @@ class Savepoint(object):
class TmpStore(object):
"""A storage-like thing to support savepoints."""
def __init__(self, storage):
self._storage = storage
for method in (
'getName', 'new_oid', 'sortKey',
'isReadOnly'
):
):
setattr(self, method, getattr(storage, method))
self._file = tempfile.TemporaryFile(prefix='TmpStore')
......@@ -1167,14 +1166,14 @@ class TmpStore(object):
# commit logic
assert version == ''
self._file.seek(self.position)
l = len(data)
lenght = len(data)
if serial is None:
serial = z64
header = p64(len(oid)) + oid + serial + p64(l)
header = p64(len(oid)) + oid + serial + p64(lenght)
self._file.write(header)
self._file.write(data)
self.index[oid] = self.position
self.position += l + len(header)
self.position += lenght + len(header)
return serial
def storeBlob(self, oid, serial, data, blobfilename, version,
......@@ -1221,7 +1220,7 @@ class TmpStore(object):
self._getBlobPath(),
"%s-%s%s" % (utils.oid_repr(oid), utils.tid_repr(tid),
SAVEPOINT_SUFFIX,)
)
)
def temporaryDirectory(self):
return self._storage.temporaryDirectory()
......@@ -1271,6 +1270,7 @@ class RootConvenience(object):
names = names[:57].rsplit(' ', 1)[0] + ' ...'
return "<root: %s>" % names
large_object_message = """The %s
object you're saving is large. (%s bytes.)
......@@ -1291,6 +1291,7 @@ large-record-size option in a configuration file) to specify a larger
size.
"""
class overridable_property(object):
"""
Same as property() with only a getter, except that setting a
......@@ -1332,7 +1333,7 @@ class TransactionMetaData(object):
extension = self.extension
return dumps(extension, _protocol) if extension else b''
def note(self, text): # for tests
def note(self, text): # for tests
text = text.strip()
if not isinstance(text, bytes):
text = text.encode('utf-8')
......
......@@ -41,6 +41,7 @@ from ZODB import valuedoc
logger = logging.getLogger('ZODB.DB')
class AbstractConnectionPool(object):
"""Manage a pool of connections.
......@@ -111,7 +112,7 @@ class AbstractConnectionPool(object):
class ConnectionPool(AbstractConnectionPool):
def __init__(self, size, timeout=1<<31):
def __init__(self, size, timeout=1 << 31):
super(ConnectionPool, self).__init__(size, timeout)
# A stack of connections available to hand out. This is a subset
......@@ -127,9 +128,8 @@ class ConnectionPool(AbstractConnectionPool):
def _append(self, c):
available = self.available
cactive = c._cache.cache_non_ghost_count
if (available and
(available[-1][1]._cache.cache_non_ghost_count > cactive)
):
if (available
and (available[-1][1]._cache.cache_non_ghost_count > cactive)):
i = len(available) - 1
while (i and
(available[i-1][1]._cache.cache_non_ghost_count > cactive)
......@@ -185,7 +185,7 @@ class ConnectionPool(AbstractConnectionPool):
(len(available) > target)
or
(available and available[0][0] < threshhold)
):
):
t, c = available.pop(0)
assert not c.opened
self.all.remove(c)
......@@ -244,7 +244,7 @@ class KeyedConnectionPool(AbstractConnectionPool):
# see the comments in ConnectionPool for method descriptions.
def __init__(self, size, timeout=1<<31):
def __init__(self, size, timeout=1 << 31):
super(KeyedConnectionPool, self).__init__(size, timeout)
self.pools = {}
......@@ -303,6 +303,7 @@ def toTimeStamp(dt):
args = utc_struct[:5]+(utc_struct[5] + dt.microsecond/1000000.0,)
return TimeStamp(*args)
def getTID(at, before):
if at is not None:
if before is not None:
......@@ -319,6 +320,7 @@ def getTID(at, before):
before = TimeStamp(before).raw()
return before
@implementer(IDatabase)
class DB(object):
"""The Object Database
......@@ -348,7 +350,7 @@ class DB(object):
def __init__(self,
storage,
pool_size=7,
pool_timeout=1<<31,
pool_timeout=1 << 31,
cache_size=400,
cache_size_bytes=0,
historical_pool_size=3,
......@@ -358,7 +360,7 @@ class DB(object):
database_name='unnamed',
databases=None,
xrefs=True,
large_record_size=1<<24,
large_record_size=1 << 24,
**storage_args):
"""Create an object database.
......@@ -425,10 +427,10 @@ class DB(object):
# Setup storage
if isinstance(storage, six.string_types):
from ZODB import FileStorage
from ZODB import FileStorage # noqa: F401 import unused
storage = ZODB.FileStorage.FileStorage(storage, **storage_args)
elif storage is None:
from ZODB import MappingStorage
from ZODB import MappingStorage # noqa: F401 import unused
storage = ZODB.MappingStorage.MappingStorage(**storage_args)
else:
assert not storage_args
......@@ -507,6 +509,7 @@ class DB(object):
"""
detail = {}
def f(con, detail=detail):
for oid, ob in con._cache.items():
module = getattr(ob.__class__, '__module__', '')
......@@ -570,17 +573,18 @@ class DB(object):
'rc': (rc(ob) - 3 - (ob._p_changed is not None)
if rc else False),
'state': ob._p_changed,
#'references': con.references(oid),
})
# 'references': con.references(oid),
})
self._connectionMap(f)
return detail
def cacheFullSweep(self): # XXX this is the same as cacheMinimize
def cacheFullSweep(self): # XXX this is the same as cacheMinimize
self._connectionMap(lambda c: c._cache.full_sweep())
def cacheLastGCTime(self):
m = [0]
def f(con, m=m):
t = con._cache.cache_last_gc_time
if t > m[0]:
......@@ -598,6 +602,7 @@ class DB(object):
"""Return the total count of non-ghost objects in all object caches
"""
m = [0]
def f(con, m=m):
m[0] += con._cache.cache_non_ghost_count
......@@ -608,6 +613,7 @@ class DB(object):
"""Return non-ghost counts sizes for all connections.
"""
m = []
def f(con, m=m):
m.append({'connection': repr(con),
'ngsize': con._cache.cache_non_ghost_count,
......@@ -731,7 +737,7 @@ class DB(object):
before = getTID(at, before)
if (before is not None and
before > self.lastTransaction() and
before > getTID(self.lastTransaction(), None)):
before > getTID(self.lastTransaction(), None)):
raise ValueError(
'cannot open an historical connection in the future.')
......@@ -773,7 +779,6 @@ class DB(object):
self.pool.availableGC()
self.historical_pool.availableGC()
result.open(transaction_manager)
return result
......@@ -808,7 +813,7 @@ class DB(object):
t-o)),
'info': d,
'before': c.before,
})
})
self._connectionMap(get_info)
return result
......@@ -836,7 +841,7 @@ class DB(object):
t -= days * 86400
try:
self.storage.pack(t, self.references)
except:
except: # noqa: E722 do not use bare 'except'
logger.exception("packing")
raise
......@@ -994,7 +999,7 @@ class DB(object):
Kept for backwards compatibility only. New oids should be
allocated in a transaction using an open Connection.
"""
return self.storage.new_oid() # pragma: no cover
return self.storage.new_oid() # pragma: no cover
def open_then_close_db_when_connection_closes(self):
"""Create and return a connection.
......@@ -1029,9 +1034,11 @@ class ContextManager(object):
self.tm.abort()
self.conn.close()
resource_counter_lock = utils.Lock()
resource_counter = 0
class TransactionalUndo(object):
def __init__(self, db, tids):
......@@ -1064,9 +1071,10 @@ class TransactionalUndo(object):
# a new storage instance, and so we must close it to be sure
# to reclaim resources in a timely manner.
#
# Once the tpc_begin method has been called, the transaction manager will
# guarantee to call either `tpc_finish` or `tpc_abort`, so those are the only
# methods we need to be concerned about calling close() from.
# Once the tpc_begin method has been called, the transaction manager
# will guarantee to call either `tpc_finish` or `tpc_abort`, so those
# are the only methods we need to be concerned about calling close()
# from.
db_mvcc_storage = self._db._mvcc_storage
self._storage = getattr(
db_mvcc_storage,
......@@ -1117,7 +1125,10 @@ def connection(*args, **kw):
"""
return DB(*args, **kw).open_then_close_db_when_connection_closes()
_transaction_meta_data_text_variables = 'user_name', 'description'
def _text_transaction_info(info):
for d in info:
for name in _transaction_meta_data_text_variables:
......
......@@ -35,10 +35,11 @@ import zope.interface
from .ConflictResolution import ConflictResolvingStorage
from .utils import load_current, maxtid
@zope.interface.implementer(
ZODB.interfaces.IStorage,
ZODB.interfaces.IStorageIteration,
)
ZODB.interfaces.IStorage,
ZODB.interfaces.IStorageIteration,
)
class DemoStorage(ConflictResolvingStorage):
"""A storage that stores changes against a read-only base database
......@@ -99,7 +100,6 @@ class DemoStorage(ConflictResolvingStorage):
self.base = base
self.close_base_on_close = close_base_on_close
if changes is None:
self._temporary_changes = True
changes = ZODB.MappingStorage.MappingStorage()
......@@ -128,16 +128,15 @@ class DemoStorage(ConflictResolvingStorage):
self._copy_methods_from_changes(changes)
self._next_oid = random.randint(1, 1<<62)
self._next_oid = random.randint(1, 1 << 62)
def _blobify(self):
if (self._temporary_changes and
isinstance(self.changes, ZODB.MappingStorage.MappingStorage)
):
isinstance(self.changes, ZODB.MappingStorage.MappingStorage)):
blob_dir = tempfile.mkdtemp('.demoblobs')
_temporary_blobdirs[
weakref.ref(self, cleanup_temporary_blobdir)
] = blob_dir
] = blob_dir
self.changes = ZODB.blob.BlobStorage(blob_dir, self.changes)
self._copy_methods_from_changes(self.changes)
return True
......@@ -147,6 +146,7 @@ class DemoStorage(ConflictResolvingStorage):
self.changes.cleanup()
__opened = True
def opened(self):
return self.__opened
......@@ -162,7 +162,7 @@ class DemoStorage(ConflictResolvingStorage):
'_lock',
'getSize', 'isReadOnly',
'sortKey', 'tpc_transaction',
):
):
setattr(self, meth, getattr(changes, meth))
supportsUndo = getattr(changes, 'supportsUndo', None)
......@@ -253,7 +253,7 @@ class DemoStorage(ConflictResolvingStorage):
t = self.changes.loadBefore(oid, end_tid)
result = result[:2] + (
end_tid if end_tid != maxtid else None,
)
)
return result
......@@ -296,7 +296,7 @@ class DemoStorage(ConflictResolvingStorage):
def new_oid(self):
with self._lock:
while 1:
oid = ZODB.utils.p64(self._next_oid )
oid = ZODB.utils.p64(self._next_oid)
if oid not in self._issued_oids:
try:
load_current(self.changes, oid)
......@@ -308,7 +308,7 @@ class DemoStorage(ConflictResolvingStorage):
self._issued_oids.add(oid)
return oid
self._next_oid = random.randint(1, 1<<62)
self._next_oid = random.randint(1, 1 << 62)
def pack(self, t, referencesf, gc=None):
if gc is None:
......@@ -325,7 +325,7 @@ class DemoStorage(ConflictResolvingStorage):
self.changes.pack(t, referencesf, gc=False)
except TypeError as v:
if 'gc' in str(v):
pass # The gc arg isn't supported. Don't pack
pass # The gc arg isn't supported. Don't pack
raise
def pop(self):
......@@ -344,7 +344,7 @@ class DemoStorage(ConflictResolvingStorage):
close_base_on_close=False)
def store(self, oid, serial, data, version, transaction):
assert version=='', "versions aren't supported"
assert version == '', "versions aren't supported"
if transaction is not self._transaction:
raise ZODB.POSException.StorageTransactionError(self, transaction)
......@@ -367,7 +367,7 @@ class DemoStorage(ConflictResolvingStorage):
def storeBlob(self, oid, oldserial, data, blobfilename, version,
transaction):
assert version=='', "versions aren't supported"
assert version == '', "versions aren't supported"
if transaction is not self._transaction:
raise ZODB.POSException.StorageTransactionError(self, transaction)
......@@ -425,7 +425,7 @@ class DemoStorage(ConflictResolvingStorage):
"Unexpected resolved conflicts")
return self._resolved
def tpc_finish(self, transaction, func = lambda tid: None):
def tpc_finish(self, transaction, func=lambda tid: None):
with self._lock:
if (transaction is not self._transaction):
raise ZODB.POSException.StorageTransactionError(
......@@ -437,11 +437,14 @@ class DemoStorage(ConflictResolvingStorage):
self._commit_lock.release()
return tid
_temporary_blobdirs = {}
def cleanup_temporary_blobdir(
ref,
_temporary_blobdirs=_temporary_blobdirs, # Make sure it stays around
):
_temporary_blobdirs=_temporary_blobdirs, # Make sure it stays around
):
blob_dir = _temporary_blobdirs.pop(ref, None)
if blob_dir and os.path.exists(blob_dir):
ZODB.blob.remove_committed_dir(blob_dir)
......@@ -29,17 +29,17 @@ from ZODB._compat import PersistentPickler, Unpickler, BytesIO, _protocol
logger = logging.getLogger('ZODB.ExportImport')
class ExportImport(object):
def exportFile(self, oid, f=None, bufsize=64 * 1024):
if f is None:
f = TemporaryFile(prefix="EXP")
elif isinstance(f, six.string_types):
f = open(f,'w+b')
f = open(f, 'w+b')
f.write(b'ZEXP')
oids = [oid]
done_oids = {}
done = done_oids.__contains__
load = self._storage.load
supports_blobs = IBlobStorage.providedBy(self._storage)
while oids:
......@@ -49,7 +49,7 @@ class ExportImport(object):
done_oids[oid] = True
try:
p, serial = load(oid)
except:
except: # noqa: E722 do not use bare 'except'
logger.debug("broken reference for oid %s", repr(oid),
exc_info=True)
else:
......@@ -58,7 +58,7 @@ class ExportImport(object):
if supports_blobs:
if not isinstance(self._reader.getGhost(p), Blob):
continue # not a blob
continue # not a blob
blobfilename = self._storage.loadBlob(oid, serial)
f.write(blob_begin_marker)
......@@ -159,8 +159,7 @@ class ExportImport(object):
return_oid_list.append(oid)
if (b'blob' in data and
isinstance(self._reader.getGhost(data), Blob)
):
isinstance(self._reader.getGhost(data), Blob)):
# Blob support
# Make sure we have a (redundant, overly) blob marker.
......@@ -198,11 +197,14 @@ class ExportImport(object):
export_end_marker = b'\377'*16
blob_begin_marker = b'\000BLOBSTART'
class Ghost(object):
__slots__ = ("oid",)
def __init__(self, oid):
self.oid = oid
def persistent_id(obj):
if isinstance(obj, Ghost):
return obj.oid
......@@ -89,57 +89,68 @@ packed_version = FILESTORAGE_MAGIC
logger = logging.getLogger('ZODB.FileStorage')
def panic(message, *data):
logger.critical(message, *data)
raise CorruptedTransactionError(message % data)
class FileStorageError(StorageError):
pass
class PackError(FileStorageError):
pass
class FileStorageFormatError(FileStorageError):
"""Invalid file format
The format of the given file is not valid.
"""
class CorruptedFileStorageError(FileStorageError,
StorageSystemError):
"""Corrupted file storage."""
class CorruptedTransactionError(CorruptedFileStorageError):
pass
class FileStorageQuotaError(FileStorageError,
StorageSystemError):
"""File storage quota exceeded."""
# Intended to be raised only in fspack.py, and ignored here.
class RedundantPackWarning(FileStorageError):
pass
class TempFormatter(FileStorageFormatter):
"""Helper class used to read formatted FileStorage data."""
def __init__(self, afile):
self._file = afile
@implementer(
IStorageRestoreable,
IStorageIteration,
IStorageUndoable,
IStorageCurrentRecordIteration,
IExternalGC,
IStorage,
)
IStorageRestoreable,
IStorageIteration,
IStorageUndoable,
IStorageCurrentRecordIteration,
IExternalGC,
IStorage,
)
class FileStorage(
FileStorageFormatter,
BlobStorageMixin,
ConflictResolvingStorage,
BaseStorage,
):
):
"""Storage that saves data in a file
"""
......@@ -224,7 +235,7 @@ class FileStorage(
of packing, the ``.old`` file is removed, if it exists, and
the data file is renamed to the ``.old`` file and finally the
``.pack`` file is rewritten to the data file.
"""
""" # noqa: E501 line too long
if read_only:
self._is_read_only = True
......@@ -291,20 +302,20 @@ class FileStorage(
self._files = FilePool(self._file_name)
r = self._restore_index()
if r is not None:
self._used_index = 1 # Marker for testing
self._used_index = 1 # Marker for testing
index, start, ltid = r
self._initIndex(index, tindex)
self._pos, self._oid, tid = read_index(
self._file, file_name, index, tindex, stop,
ltid=ltid, start=start, read_only=read_only,
)
)
else:
self._used_index = 0 # Marker for testing
self._used_index = 0 # Marker for testing
self._pos, self._oid, tid = read_index(
self._file, file_name, index, tindex, stop,
read_only=read_only,
)
)
self._save_index()
self._ltid = tid
......@@ -344,9 +355,9 @@ class FileStorage(
return BaseStorage.copyTransactionsFrom(self, other)
def _initIndex(self, index, tindex):
self._index=index
self._tindex=tindex
self._index_get=index.get
self._index = index
self._tindex = tindex
self._index_get = index.get
def __len__(self):
return len(self._index)
......@@ -356,6 +367,7 @@ class FileStorage(
return fsIndex(), {}
_saved = 0
def _save_index(self):
"""Write the database index to a file to support quick startup."""
......@@ -373,7 +385,8 @@ class FileStorage(
except OSError:
pass
os.rename(tmp_name, index_name)
except: pass
except: # noqa: E722 do not use bare 'except'
pass
self._saved += 1
......@@ -401,10 +414,10 @@ class FileStorage(
def _check_sanity(self, index, pos):
if pos < 100:
return 0 # insane
return 0 # insane
self._file.seek(0, 2)
if self._file.tell() < pos:
return 0 # insane
return 0 # insane
ltid = None
max_checked = 5
......@@ -416,22 +429,22 @@ class FileStorage(
tl = u64(rstl)
pos = pos - tl - 8
if pos < 4:
return 0 # insane
return 0 # insane
h = self._read_txn_header(pos)
if not ltid:
ltid = h.tid
if h.tlen != tl:
return 0 # inconsistent lengths
return 0 # inconsistent lengths
if h.status == 'u':
continue # undone trans, search back
continue # undone trans, search back
if h.status not in ' p':
return 0 # insane
return 0 # insane
if tl < h.headerlen():
return 0 # insane
return 0 # insane
tend = pos + tl
opos = pos + h.headerlen()
if opos == tend:
continue # empty trans
continue # empty trans
while opos < tend and checked < max_checked:
# Read the data records for this transaction
......@@ -441,7 +454,7 @@ class FileStorage(
return 0
if index.get(h.oid, 0) != opos:
return 0 # insane
return 0 # insane
checked += 1
......@@ -457,13 +470,13 @@ class FileStorage(
# fsIndex here, and, if we're not in read-only mode, the .index
# file is rewritten with the converted fsIndex so we don't need to
# convert it again the next time.
file_name=self.__name__
index_name=file_name+'.index'
file_name = self.__name__
index_name = file_name+'.index'
if os.path.exists(index_name):
try:
info = fsIndex.load(index_name)
except:
except: # noqa: E722 do not use bare 'except'
logger.exception('loading index')
return None
else:
......@@ -503,13 +516,13 @@ class FileStorage(
def close(self):
self._file.close()
self._files.close()
if hasattr(self,'_lock_file'):
if hasattr(self, '_lock_file'):
self._lock_file.close()
if self._tfile:
self._tfile.close()
try:
self._save_index()
except:
except: # noqa: E722 do not use bare 'except'
# Log the error and continue
logger.exception("Error saving index on close()")
......@@ -524,7 +537,7 @@ class FileStorage(
except TypeError:
raise TypeError("invalid oid %r" % (oid,))
load = load_current # Keep load for now for old clients
load = load_current # Keep load for now for old clients
def load(self, oid, version=''):
"""Return pickle data and serial number."""
......@@ -593,7 +606,6 @@ class FileStorage(
self.set_max_oid(oid)
old = self._index_get(oid, 0)
committed_tid = None
pnv = None
if old:
h = self._read_data_header(old, oid)
committed_tid = h.tid
......@@ -798,7 +810,7 @@ class FileStorage(
cp(self._tfile, self._file, dlen)
self._file.write(p64(tl))
self._file.flush()
except:
except: # noqa: E722 do not use bare 'except'
# Hm, an error occurred writing out the data. Maybe the
# disk is full. We don't want any turd at the end.
self._file.truncate(self._pos)
......@@ -833,7 +845,7 @@ class FileStorage(
# At this point, we may have committed the data to disk.
# If we fail from here, we're in bad shape.
self._finish_finish(tid)
except:
except: # noqa: E722 do not use bare 'except'
# Ouch. This is bad. Let's try to get back to where we were
# and then roll over and die
logger.critical("Failure in _finish. Closing.", exc_info=True)
......@@ -857,7 +869,7 @@ class FileStorage(
if self._nextpos:
self._file.truncate(self._pos)
self._files.flush()
self._nextpos=0
self._nextpos = 0
self._blob_tpc_abort()
def _undoDataInfo(self, oid, pos, tpos):
......@@ -883,7 +895,7 @@ class FileStorage(
pos = h.back
if tpos:
self._tfile.seek(tpos) # Restore temp file to end
self._tfile.seek(tpos) # Restore temp file to end
return h.tid, pos, data
......@@ -909,7 +921,7 @@ class FileStorage(
pointer 0.
"""
copy = True # Can we just copy a data pointer
copy = True # Can we just copy a data pointer
# First check if it is possible to undo this record.
tpos = self._tindex.get(oid, 0)
......@@ -1036,13 +1048,13 @@ class FileStorage(
raise StorageTransactionError(self, transaction)
with self._lock:
# Find the right transaction to undo and call _txn_undo_write().
tid = decodebytes(transaction_id + b'\n')
assert len(tid) == 8
tpos = self._txn_find(tid, 1)
tindex = self._txn_undo_write(tpos)
self._tindex.update(tindex)
return self._tid, tindex.keys()
# Find the right transaction to undo and call _txn_undo_write().
tid = decodebytes(transaction_id + b'\n')
assert len(tid) == 8
tpos = self._txn_find(tid, 1)
tindex = self._txn_undo_write(tpos)
self._tindex.update(tindex)
return self._tid, tindex.keys()
def _txn_find(self, tid, stop_at_pack):
pos = self._pos
......@@ -1080,7 +1092,7 @@ class FileStorage(
while pos < tend:
h = self._read_data_header(pos)
if h.oid in failures:
del failures[h.oid] # second chance!
del failures[h.oid] # second chance!
assert base + self._tfile.tell() == here, (here, base,
self._tfile.tell())
......@@ -1096,14 +1108,14 @@ class FileStorage(
try:
up, userial = self._loadBackTxn(h.oid, prev)
except POSKeyError:
pass # It was removed, so no need to copy data
pass # It was removed, so no need to copy data
else:
if self.is_blob_record(up):
# We're undoing a blob modification operation.
# We have to copy the blob data
tmp = mktemp(dir=self.fshelper.temp_dir)
with self.openCommittedBlobFile(
h.oid, userial) as sfp:
h.oid, userial) as sfp:
with open(tmp, 'wb') as dfp:
cp(sfp, dfp)
self._blob_storeblob(h.oid, self._tid, tmp)
......@@ -1137,7 +1149,8 @@ class FileStorage(
pos = self._lookup_pos(oid)
while 1:
if len(r) >= size: return r
if len(r) >= size:
return r
h = self._read_data_header(pos)
th = self._read_txn_header(h.tloc)
......@@ -1195,7 +1208,7 @@ class FileStorage(
if self._is_read_only:
raise ReadOnlyError()
stop = TimeStamp(*time.gmtime(t)[:5]+(t%60,)).raw()
stop = TimeStamp(*time.gmtime(t)[:5]+(t % 60,)).raw()
if stop == z64:
raise FileStorageError('Invalid pack time')
......@@ -1217,8 +1230,6 @@ class FileStorage(
if self.blob_dir and os.path.exists(self.blob_dir + ".old"):
remove_committed_dir(self.blob_dir + ".old")
cleanup = []
have_commit_lock = False
try:
pack_result = None
......@@ -1304,12 +1315,12 @@ class FileStorage(
if removed:
maybe_remove_empty_dir_containing(path, level+1)
if self.pack_keep_old:
# Helpers that move oid dir or revision file to the old dir.
os.mkdir(old)
link_or_copy(os.path.join(self.blob_dir, '.layout'),
os.path.join(old, '.layout'))
def handle_file(path):
newpath = old+path[lblob_dir:]
dest = os.path.dirname(newpath)
......@@ -1412,7 +1423,7 @@ class FileStorage(
next_oid = pack(">Q", oid_as_long + 1)
try:
next_oid = index.minKey(next_oid)
except ValueError: # "empty tree" error
except ValueError: # "empty tree" error
next_oid = None
data, tid = load_current(self, oid)
......@@ -1429,6 +1440,7 @@ class FileStorage(
#
######################################################################
def shift_transactions_forward(index, tindex, file, pos, opos):
"""Copy transactions forward in the data file
......@@ -1436,17 +1448,16 @@ def shift_transactions_forward(index, tindex, file, pos, opos):
"""
# Cache a bunch of methods
seek=file.seek
read=file.read
write=file.write
seek = file.seek
read = file.read
write = file.write
index_get=index.get
index_get = index.get
# Initialize,
pv=z64
p1=opos
p2=pos
offset=p2-p1
p1 = opos
p2 = pos
offset = p2-p1
# Copy the data in two stages. In the packing stage,
# we skip records that are non-current or that are for
......@@ -1456,24 +1467,25 @@ def shift_transactions_forward(index, tindex, file, pos, opos):
# transactions, however, we have to update various back pointers.
# We have to have the storage lock in the second phase to keep
# data from being changed while we're copying.
pnv=None
while 1:
# Read the transaction record
seek(pos)
h=read(TRANS_HDR_LEN)
if len(h) < TRANS_HDR_LEN: break
tid, stl, status, ul, dl, el = unpack(TRANS_HDR,h)
h = read(TRANS_HDR_LEN)
if len(h) < TRANS_HDR_LEN:
break
tid, stl, status, ul, dl, el = unpack(TRANS_HDR, h)
status = as_text(status)
if status=='c': break # Oops. we found a checkpoint flag.
tl=u64(stl)
tpos=pos
tend=tpos+tl
if status == 'c':
break # Oops. we found a checkpoint flag.
tl = u64(stl)
tpos = pos
tend = tpos+tl
otpos=opos # start pos of output trans
otpos = opos # start pos of output trans
thl=ul+dl+el
h2=read(thl)
thl = ul+dl+el
h2 = read(thl)
if len(h2) != thl:
raise PackError(opos)
......@@ -1482,80 +1494,85 @@ def shift_transactions_forward(index, tindex, file, pos, opos):
write(h)
write(h2)
thl=TRANS_HDR_LEN+thl
pos=tpos+thl
opos=otpos+thl
thl = TRANS_HDR_LEN+thl
pos = tpos+thl
opos = otpos+thl
while pos < tend:
# Read the data records for this transaction
seek(pos)
h=read(DATA_HDR_LEN)
oid,serial,sprev,stloc,vlen,splen = unpack(DATA_HDR, h)
h = read(DATA_HDR_LEN)
oid, serial, sprev, stloc, vlen, splen = unpack(DATA_HDR, h)
assert not vlen
plen=u64(splen)
dlen=DATA_HDR_LEN+(plen or 8)
plen = u64(splen)
dlen = DATA_HDR_LEN+(plen or 8)
tindex[oid]=opos
tindex[oid] = opos
if plen: p=read(plen)
if plen:
p = read(plen)
else:
p=read(8)
p=u64(p)
if p >= p2: p=p-offset
p = read(8)
p = u64(p)
if p >= p2:
p = p-offset
elif p >= p1:
# Ick, we're in trouble. Let's bail
# to the index and hope for the best
p=index_get(oid, 0)
p=p64(p)
p = index_get(oid, 0)
p = p64(p)
# WRITE
seek(opos)
sprev=p64(index_get(oid, 0))
sprev = p64(index_get(oid, 0))
write(pack(DATA_HDR,
oid, serial, sprev, p64(otpos), 0, splen))
write(p)
opos=opos+dlen
pos=pos+dlen
opos = opos+dlen
pos = pos+dlen
# skip the (intentionally redundant) transaction length
pos=pos+8
pos = pos+8
if status != 'u':
index.update(tindex) # Record the position
index.update(tindex) # Record the position
tindex.clear()
write(stl)
opos=opos+8
opos = opos+8
return opos
def search_back(file, pos):
seek=file.seek
read=file.read
seek(0,2)
s=p=file.tell()
seek = file.seek
read = file.read
seek(0, 2)
s = p = file.tell()
while p > pos:
seek(p-8)
l=u64(read(8))
if l <= 0: break
p=p-l-8
l_ = u64(read(8))
if l_ <= 0:
break
p = p-l_-8
return p, s
def recover(file_name):
file=open(file_name, 'r+b')
index={}
tindex={}
file = open(file_name, 'r+b')
index = {}
tindex = {}
pos, oid, tid = read_index(file, file_name, index, tindex, recover=1)
if oid is not None:
print("Nothing to recover")
return
opos=pos
opos = pos
pos, sz = search_back(file, pos)
if pos < sz:
npos = shift_transactions_forward(index, tindex, file, pos, opos)
......@@ -1566,7 +1583,6 @@ def recover(file_name):
pos-opos, npos))
def read_index(file, name, index, tindex, stop=b'\377'*8,
ltid=z64, start=4, maxoid=z64, recover=0, read_only=0):
"""Scan the file storage and update the index.
......@@ -1642,7 +1658,7 @@ def read_index(file, name, index, tindex, stop=b'\377'*8,
logger.warning("%s time-stamp reduction at %s", name, pos)
ltid = tid
if pos+(tl+8) > file_size or status=='c':
if pos+(tl+8) > file_size or status == 'c':
# Hm, the data were truncated or the checkpoint flag wasn't
# cleared. They may also be corrupted,
# in which case, we don't want to totally lose the data.
......@@ -1671,9 +1687,9 @@ def read_index(file, name, index, tindex, stop=b'\377'*8,
name, pos)
if not read_only:
logger.warning(
"It appears that there is invalid data at the end of "
"the file, possibly due to a system crash. %s "
"truncated to recover from bad data at end." % name)
"It appears that there is invalid data at the end of "
"the file, possibly due to a system crash. %s "
"truncated to recover from bad data at end." % name)
_truncate(file, name, pos)
break
else:
......@@ -1727,7 +1743,7 @@ def read_index(file, name, index, tindex, stop=b'\377'*8,
if pos != tend:
if recover:
return tpos, None, None
panic("%s data records don't add up at %s",name,tpos)
panic("%s data records don't add up at %s", name, tpos)
# Read the (intentionally redundant) transaction length
seek(pos)
......@@ -1748,7 +1764,7 @@ def read_index(file, name, index, tindex, stop=b'\377'*8,
maxoid = index.maxKey()
except ValueError:
# The index is empty.
pass # maxoid is already equal to z64
pass # maxoid is already equal to z64
return pos, maxoid, ltid
......@@ -1759,18 +1775,18 @@ def _truncate(file, name, pos):
try:
i = 0
while 1:
oname='%s.tr%s' % (name, i)
oname = '%s.tr%s' % (name, i)
if os.path.exists(oname):
i += 1
else:
logger.warning("Writing truncated data from %s to %s",
name, oname)
o = open(oname,'wb')
o = open(oname, 'wb')
file.seek(pos)
cp(file, o, file_size-pos)
o.close()
break
except:
except: # noqa: E722 do not use bare 'except'
logger.exception("couldn\'t write truncated data for %s", name)
raise StorageSystemError("Couldn't save truncated data")
......@@ -1791,7 +1807,7 @@ class FileIterator(FileStorageFormatter):
self._file_name = filename
if file.read(4) != packed_version:
raise FileStorageFormatError(file.name)
file.seek(0,2)
file.seek(0, 2)
self._file_size = file.tell()
if (pos < 4) or pos > self._file_size:
raise ValueError("Given position is greater than the file size",
......@@ -1831,7 +1847,7 @@ class FileIterator(FileStorageFormatter):
file = self._file
pos1 = self._pos
file.seek(pos1)
tid1 = file.read(8) # XXX bytes
tid1 = file.read(8) # XXX bytes
if len(tid1) < 8:
raise CorruptedError("Couldn't read tid.")
if start < tid1:
......@@ -1852,13 +1868,13 @@ class FileIterator(FileStorageFormatter):
# case, we'll just scan from the beginning if the file is
# small enough, otherwise we'll fail.
file.seek(self._file_size-8)
l = u64(file.read(8))
if not (l + 12 <= self._file_size and
self._read_num(self._file_size-l) == l):
if self._file_size < (1<<20):
l_ = u64(file.read(8))
if not (l_ + 12 <= self._file_size and
self._read_num(self._file_size-l_) == l_):
if self._file_size < (1 << 20):
return self._scan_foreward(start)
raise ValueError("Can't find last transaction in large file")
pos2 = self._file_size-l-8
pos2 = self._file_size-l_-8
file.seek(pos2)
tid2 = file.read(8)
if tid2 < tid1:
......@@ -1881,7 +1897,6 @@ class FileIterator(FileStorageFormatter):
def _scan_forward(self, pos, start):
logger.debug("Scan forward %s:%s looking for %r",
self._file_name, pos, start)
file = self._file
while 1:
# Read the transaction record
h = self._read_txn_header(pos)
......@@ -1968,10 +1983,10 @@ class FileIterator(FileStorageFormatter):
logger.critical("%s has invalid transaction header at %s",
self._file.name, pos)
logger.warning(
"It appears that there is invalid data at the end of "
"the file, possibly due to a system crash. %s "
"truncated to recover from bad data at end."
% self._file.name)
"It appears that there is invalid data at the end of "
"the file, possibly due to a system crash. %s "
"truncated to recover from bad data at end."
% self._file.name)
break
else:
logger.warning("%s has invalid transaction header at %s",
......@@ -2016,6 +2031,7 @@ class TransactionRecord(_TransactionRecord):
def __iter__(self):
return TransactionRecordIterator(self)
class TransactionRecordIterator(FileStorageFormatter):
"""Iterate over the transactions in a FileStorage file."""
......@@ -2037,7 +2053,7 @@ class TransactionRecordIterator(FileStorageFormatter):
if pos + dlen > self._tend or h.tloc != self._tpos:
logger.warning("%s data record exceeds transaction"
" record at %s", file.name, pos)
" record at %s", self._file.name, pos)
break
self._pos = pos + dlen
......@@ -2122,7 +2138,7 @@ class UndoSearch(object):
if el:
try:
e = loads(self.file.read(el))
except:
except: # noqa: E722 do not use bare 'except'
pass
d = {'id': encodebytes(tid).rstrip(),
'time': TimeStamp(tid).timeTime(),
......@@ -2132,6 +2148,7 @@ class UndoSearch(object):
d.update(e)
return d
class FilePool(object):
closed = False
......@@ -2192,7 +2209,6 @@ class FilePool(object):
while self._files:
self._files.pop().close()
def flush(self):
"""Empty read buffers.
......
......@@ -90,9 +90,11 @@ from ZODB.POSException import POSKeyError
from ZODB.utils import u64, oid_repr, as_bytes
from ZODB._compat import PY3
class CorruptedError(Exception):
pass
class CorruptedDataError(CorruptedError):
def __init__(self, oid=None, buf=None, pos=None):
......@@ -110,6 +112,7 @@ class CorruptedDataError(CorruptedError):
msg += " at %d" % self.pos
return msg
# the struct formats for the headers
TRANS_HDR = ">8sQcHHH"
DATA_HDR = ">8s8sQQHQ"
......@@ -121,6 +124,7 @@ assert struct.calcsize(DATA_HDR) == DATA_HDR_LEN
logger = logging.getLogger('ZODB.FileStorage.format')
class FileStorageFormatter(object):
"""Mixin class that can read and write the low-level format."""
......@@ -211,7 +215,7 @@ class FileStorageFormatter(object):
self.ltid = th.tid
if th.status == "c":
self.fail(pos, "transaction with checkpoint flag set")
if not th.status in " pu": # recognize " ", "p", and "u" as valid
if th.status not in " pu": # recognize " ", "p", and "u" as valid
self.fail(pos, "invalid transaction status: %r", th.status)
if th.tlen < th.headerlen():
self.fail(pos, "invalid transaction header: "
......@@ -232,9 +236,11 @@ class FileStorageFormatter(object):
if dh.plen:
self.fail(pos, "data record has back pointer and data")
def DataHeaderFromString(s):
return DataHeader(*struct.unpack(DATA_HDR, s))
class DataHeader(object):
"""Header for a data record."""
......@@ -250,7 +256,7 @@ class DataHeader(object):
self.prev = prev
self.tloc = tloc
self.plen = plen
self.back = 0 # default
self.back = 0 # default
def asString(self):
return struct.pack(DATA_HDR, self.oid, self.tid, self.prev,
......@@ -259,12 +265,14 @@ class DataHeader(object):
def recordlen(self):
return DATA_HDR_LEN + (self.plen or 8)
def TxnHeaderFromString(s):
res = TxnHeader(*struct.unpack(TRANS_HDR, s))
if PY3:
res.status = res.status.decode('ascii')
return res
class TxnHeader(object):
"""Header for a transaction record."""
......
......@@ -20,19 +20,20 @@ from ZODB.FileStorage.format import DATA_HDR, DATA_HDR_LEN
from ZODB.TimeStamp import TimeStamp
from ZODB.utils import u64, get_pickle_metadata
def fsdump(path, file=None, with_offset=1):
iter = FileIterator(path)
for i, trans in enumerate(iter):
size = trans._tend - trans._tpos
if with_offset:
print(("Trans #%05d tid=%016x size=%d time=%s offset=%d" %
(i, u64(trans.tid), size,
TimeStamp(trans.tid), trans._pos)), file=file)
(i, u64(trans.tid), size,
TimeStamp(trans.tid), trans._pos)), file=file)
else:
print(("Trans #%05d tid=%016x size=%d time=%s" %
(i, u64(trans.tid), size, TimeStamp(trans.tid))), file=file)
(i, u64(trans.tid), size, TimeStamp(trans.tid))), file=file)
print((" status=%r user=%r description=%r" %
(trans.status, trans.user, trans.description)), file=file)
(trans.status, trans.user, trans.description)), file=file)
for j, rec in enumerate(trans):
if rec.data is None:
......@@ -51,13 +52,15 @@ def fsdump(path, file=None, with_offset=1):
bp = ""
print((" data #%05d oid=%016x%s class=%s%s" %
(j, u64(rec.oid), size, fullclass, bp)), file=file)
(j, u64(rec.oid), size, fullclass, bp)), file=file)
iter.close()
def fmt(p64):
# Return a nicely formatted string for a packaged 64-bit value
return "%016x" % u64(p64)
class Dumper(object):
"""A very verbose dumper for debuggin FileStorage problems."""
......@@ -87,13 +90,13 @@ class Dumper(object):
print("transaction id: %s" % fmt(tid), file=self.dest)
print("trec len: %d" % tlen, file=self.dest)
print("status: %r" % status, file=self.dest)
user = descr = extra = ""
user = descr = ""
if ul:
user = self.file.read(ul)
if dl:
descr = self.file.read(dl)
if el:
extra = self.file.read(el)
self.file.read(el)
print("user: %r" % user, file=self.dest)
print("description: %r" % descr, file=self.dest)
print("len(extra): %d" % el, file=self.dest)
......@@ -121,6 +124,7 @@ class Dumper(object):
sbp = self.file.read(8)
print("backpointer: %d" % u64(sbp), file=self.dest)
def main():
import sys
fsdump(sys.argv[1])
......
......@@ -18,10 +18,14 @@ from ZODB.serialize import get_refs
from ZODB.TimeStamp import TimeStamp
# Extract module.class string from pickle.
def get_class(pickle):
return "%s.%s" % get_pickle_metadata(pickle)
# Shorten a string for display.
def shorten(s, size=50):
if len(s) <= size:
return s
......@@ -35,6 +39,7 @@ def shorten(s, size=50):
sep = " ... "
return s[:nleading] + sep + s[-ntrailing:]
class Tracer(object):
"""Trace all occurrences of a set of oids in a FileStorage.
......@@ -84,7 +89,7 @@ class Tracer(object):
self.oids[oid] = 0 # 0 revisions seen so far
def _msg(self, oid, tid, *args):
self.msgs.append( (oid, tid, ' '.join(map(str, args))) )
self.msgs.append((oid, tid, ' '.join(map(str, args))))
self._produced_msg = True
def report(self):
......@@ -98,9 +103,9 @@ class Tracer(object):
NOT_SEEN = "this oid was not defined (no data record for it found)"
for oid in oids:
if oid not in oid2name:
msgs.append( (oid, None, NOT_SEEN) )
msgs.append((oid, None, NOT_SEEN))
msgs.sort() # oids are primary key, tids secondary
msgs.sort() # oids are primary key, tids secondary
current_oid = current_tid = None
for oid, tid, msg in msgs:
if oid != current_oid:
......
......@@ -36,9 +36,11 @@ import ZODB.POSException
logger = logging.getLogger(__name__)
class PackError(ZODB.POSException.POSError):
pass
class PackCopier(FileStorageFormatter):
def __init__(self, f, index, tindex):
......@@ -54,7 +56,7 @@ class PackCopier(FileStorageFormatter):
self._file.seek(pos - 8)
pos = pos - u64(self._file.read(8)) - 8
self._file.seek(pos)
h = self._file.read(TRANS_HDR_LEN) # XXX bytes
h = self._file.read(TRANS_HDR_LEN) # XXX bytes
_tid = h[:8]
if _tid == tid:
return pos
......@@ -144,6 +146,7 @@ class PackCopier(FileStorageFormatter):
finally:
self._file.seek(pos)
class GC(FileStorageFormatter):
def __init__(self, file, eof, packtime, gc, referencesf):
......@@ -330,6 +333,7 @@ class GC(FileStorageFormatter):
else:
return []
class FileStoragePacker(FileStorageFormatter):
# path is the storage file path.
......@@ -409,15 +413,15 @@ class FileStoragePacker(FileStorageFormatter):
# try our best, but don't fail
try:
self._tfile.close()
except:
except: # noqa: E722 do not use bare 'except'
pass
try:
self._file.close()
except:
except: # noqa: E722 do not use bare 'except'
pass
try:
os.remove(self._name + ".pack")
except:
except: # noqa: E722 do not use bare 'except'
pass
if self.blob_removed is not None:
self.blob_removed.close()
......@@ -459,8 +463,8 @@ class FileStoragePacker(FileStorageFormatter):
# argument, and then on every platform except native
# Windows it was observed that we could read stale
# data from the tail end of the file.
self._file.close() # else self.gc keeps the original
# alive & open
self._file.close() # else self.gc keeps the original
# alive & open
self._file = open(self._path, "rb", 0)
self._file.seek(0, 2)
self.file_end = self._file.tell()
......@@ -483,13 +487,12 @@ class FileStoragePacker(FileStorageFormatter):
if self.locked:
self._commit_lock.release()
raise # don't succeed silently
except:
except: # noqa: E722 do not use bare 'except'
if self.locked:
self._commit_lock.release()
raise
def copyToPacktime(self):
offset = 0 # the amount of space freed by packing
pos = self._metadata_size
new_pos = pos
......@@ -506,7 +509,6 @@ class FileStoragePacker(FileStorageFormatter):
self._tfile.seek(new_pos - 8)
self._tfile.write(p64(tlen))
tlen = self._read_num(pos)
if tlen != th.tlen:
self.fail(pos, "redundant transaction length does not "
......@@ -546,8 +548,8 @@ class FileStoragePacker(FileStorageFormatter):
# record. There's a bug in ZEO blob support that causes
# duplicate data records.
rpos = self.gc.reachable.get(h.oid)
is_dup = (rpos
and self._read_data_header(rpos).tid == h.tid)
is_dup = (
rpos and self._read_data_header(rpos).tid == h.tid)
if not is_dup:
if h.oid not in self.gc.reachable:
self.blob_removed.write(
......@@ -569,7 +571,6 @@ class FileStoragePacker(FileStorageFormatter):
s = th.asString()
new_tpos = self._tfile.tell()
self._tfile.write(s)
new_pos = new_tpos + len(s)
copy = 1
if h.plen:
......@@ -578,7 +579,6 @@ class FileStoragePacker(FileStorageFormatter):
data = self.fetchDataViaBackpointer(h.oid, h.back)
self.writePackedDataRecord(h, data, new_tpos)
new_pos = self._tfile.tell()
return new_tpos, pos
......
......@@ -13,6 +13,7 @@
##############################################################################
import zope.interface
class IFileStoragePacker(zope.interface.Interface):
def __call__(storage, referencesf, stop, gc):
......@@ -58,20 +59,21 @@ class IFileStoragePacker(zope.interface.Interface):
corresponding to the file records.
"""
class IFileStorage(zope.interface.Interface):
packer = zope.interface.Attribute(
"The IFileStoragePacker to be used for packing."
)
)
_file = zope.interface.Attribute(
"The file object used to access the underlying data."
)
)
_lock = zope.interface.Attribute(
"The storage lock."
)
)
_commit_lock = zope.interface.Attribute(
"The storage commit lock."
)
)
......@@ -29,10 +29,11 @@ checker = renormalizing.RENormalizing([
# Python 3 adds module name to exceptions.
(re.compile("ZODB.POSException.POSKeyError"), r"POSKeyError"),
(re.compile("ZODB.FileStorage.FileStorage.FileStorageQuotaError"),
"FileStorageQuotaError"),
"FileStorageQuotaError"),
(re.compile('data.fs:[0-9]+'), 'data.fs:<OFFSET>'),
])
def pack_keep_old():
"""Should a copy of the database be kept?
......@@ -106,6 +107,7 @@ directory for blobs is kept.)
>>> db.close()
"""
def pack_with_repeated_blob_records():
"""
There is a bug in ZEO that causes duplicate bloc database records
......@@ -144,6 +146,7 @@ def pack_with_repeated_blob_records():
>>> db.close()
"""
def _save_index():
"""
......@@ -187,6 +190,7 @@ cleanup
"""
def pack_disk_full_copyToPacktime():
"""Recover from a disk full situation by removing the `.pack` file
......@@ -239,6 +243,7 @@ check the data we added
>>> db.close()
"""
def pack_disk_full_copyRest():
"""Recover from a disk full situation by removing the `.pack` file
......@@ -307,6 +312,7 @@ check the data we added
>>> db.close()
"""
def test_suite():
return unittest.TestSuite((
doctest.DocFileSuite(
......@@ -319,4 +325,4 @@ def test_suite():
setUp=ZODB.tests.util.setUp,
tearDown=ZODB.tests.util.tearDown,
checker=checker),
))
))
......@@ -28,9 +28,9 @@ import zope.interface
@zope.interface.implementer(
ZODB.interfaces.IStorage,
ZODB.interfaces.IStorageIteration,
)
ZODB.interfaces.IStorage,
ZODB.interfaces.IStorageIteration,
)
class MappingStorage(object):
"""In-memory storage implementation
......@@ -50,7 +50,8 @@ class MappingStorage(object):
"""
self.__name__ = name
self._data = {} # {oid->{tid->pickle}}
self._transactions = BTrees.OOBTree.OOBTree() # {tid->TransactionRecord}
# {tid->TransactionRecord}
self._transactions = BTrees.OOBTree.OOBTree()
self._ltid = ZODB.utils.z64
self._last_pack = None
self._lock = ZODB.utils.RLock()
......@@ -117,14 +118,14 @@ class MappingStorage(object):
tids.reverse()
return [
dict(
time = ZODB.TimeStamp.TimeStamp(tid).timeTime(),
tid = tid,
serial = tid,
user_name = self._transactions[tid].user,
description = self._transactions[tid].description,
extension = self._transactions[tid].extension,
size = len(tid_data[tid])
)
time=ZODB.TimeStamp.TimeStamp(tid).timeTime(),
tid=tid,
serial=tid,
user_name=self._transactions[tid].user,
description=self._transactions[tid].description,
extension=self._transactions[tid].extension,
size=len(tid_data[tid])
)
for tid in tids]
# ZODB.interfaces.IStorage
......@@ -167,8 +168,8 @@ class MappingStorage(object):
else:
raise ZODB.POSException.POSKeyError(oid)
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def loadSerial(self, oid, serial):
tid_data = self._data.get(oid)
......@@ -192,7 +193,7 @@ class MappingStorage(object):
if not self._data:
return
stop = ZODB.TimeStamp.TimeStamp(*time.gmtime(t)[:5]+(t%60,)).raw()
stop = ZODB.TimeStamp.TimeStamp(*time.gmtime(t)[:5]+(t % 60,)).raw()
if self._last_pack is not None and self._last_pack >= stop:
if self._last_pack == stop:
return
......@@ -298,7 +299,7 @@ class MappingStorage(object):
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
def tpc_finish(self, transaction, func = lambda tid: None):
def tpc_finish(self, transaction, func=lambda tid: None):
if (transaction is not self._transaction):
raise ZODB.POSException.StorageTransactionError(
"tpc_finish called with wrong transaction")
......@@ -332,6 +333,7 @@ class MappingStorage(object):
raise ZODB.POSException.StorageTransactionError(
"tpc_vote called with wrong transaction")
class TransactionRecord(object):
status = ' '
......@@ -357,11 +359,11 @@ class TransactionRecord(object):
del self.data[oid]
return not self.data
@zope.interface.implementer(ZODB.interfaces.IStorageRecordInformation)
class DataRecord(object):
"""Abstract base class for iterator protocol"""
version = ''
data_txn = None
......@@ -370,5 +372,6 @@ class DataRecord(object):
self.tid = tid
self.data = data
def DB(*args, **kw):
return ZODB.DB(MappingStorage(), *args, **kw)
......@@ -18,20 +18,26 @@ $Id$"""
from ZODB.utils import oid_repr, readable_tid_repr
# BBB: We moved the two transactions to the transaction package
from transaction.interfaces import TransactionError, TransactionFailedError
from transaction.interfaces import TransactionError # noqa: F401 import unused
from transaction.interfaces import TransactionFailedError # noqa: F401
import transaction.interfaces
def _fmt_undo(oid, reason):
s = reason and (": %s" % reason) or ""
return "Undo error %s%s" % (oid_repr(oid), s)
def _recon(class_, state):
err = class_.__new__(class_)
err.__setstate__(state)
return err
_recon.__no_side_effects__ = True
class POSError(Exception):
"""Persistent object system error."""
......@@ -49,9 +55,10 @@ class POSError(Exception):
# the args would then get lost, leading to unprintable exceptions
# and worse. Manually assign to args from the state to be sure
# this doesn't happen.
super(POSError,self).__setstate__(state)
super(POSError, self).__setstate__(state)
self.args = state['args']
class POSKeyError(POSError, KeyError):
"""Key not found in database."""
......@@ -143,6 +150,7 @@ class ConflictError(POSError, transaction.interfaces.TransientError):
def get_serials(self):
return self.serials
class ReadConflictError(ConflictError):
"""Conflict detected when object was requested to stay unchanged.
......@@ -156,64 +164,67 @@ class ReadConflictError(ConflictError):
- object is found to be removed, and
- there is possibility that database pack was running simultaneously.
"""
def __init__(self, message=None, object=None, serials=None, **kw):
if message is None:
message = "database read conflict error"
ConflictError.__init__(self, message=message, object=object,
serials=serials, **kw)
class BTreesConflictError(ConflictError):
"""A special subclass for BTrees conflict errors."""
msgs = [# 0; i2 or i3 bucket split; positions are all -1
'Conflicting bucket split',
msgs = [
# 0; i2 or i3 bucket split; positions are all -1
'Conflicting bucket split',
# 1; keys the same, but i2 and i3 values differ, and both values
# differ from i1's value
'Conflicting changes',
# 1; keys the same, but i2 and i3 values differ, and both values
# differ from i1's value
'Conflicting changes',
# 2; i1's value changed in i2, but key+value deleted in i3
'Conflicting delete and change',
# 2; i1's value changed in i2, but key+value deleted in i3
'Conflicting delete and change',
# 3; i1's value changed in i3, but key+value deleted in i2
'Conflicting delete and change',
# 3; i1's value changed in i3, but key+value deleted in i2
'Conflicting delete and change',
# 4; i1 and i2 both added the same key, or both deleted the
# same key
'Conflicting inserts or deletes',
# 4; i1 and i2 both added the same key, or both deleted the
# same key
'Conflicting inserts or deletes',
# 5; i2 and i3 both deleted the same key
'Conflicting deletes',
# 5; i2 and i3 both deleted the same key
'Conflicting deletes',
# 6; i2 and i3 both added the same key
'Conflicting inserts',
# 6; i2 and i3 both added the same key
'Conflicting inserts',
# 7; i2 and i3 both deleted the same key, or i2 changed the value
# associated with a key and i3 deleted that key
'Conflicting deletes, or delete and change',
# 7; i2 and i3 both deleted the same key, or i2 changed the value
# associated with a key and i3 deleted that key
'Conflicting deletes, or delete and change',
# 8; i2 and i3 both deleted the same key, or i3 changed the value
# associated with a key and i2 deleted that key
'Conflicting deletes, or delete and change',
# 8; i2 and i3 both deleted the same key, or i3 changed the value
# associated with a key and i2 deleted that key
'Conflicting deletes, or delete and change',
# 9; i2 and i3 both deleted the same key
'Conflicting deletes',
# 9; i2 and i3 both deleted the same key
'Conflicting deletes',
# 10; i2 and i3 deleted all the keys, and didn't insert any,
# leaving an empty bucket; conflict resolution doesn't have
# enough info to unlink an empty bucket from its containing
# BTree correctly
'Empty bucket from deleting all keys',
# 10; i2 and i3 deleted all the keys, and didn't insert any,
# leaving an empty bucket; conflict resolution doesn't have
# enough info to unlink an empty bucket from its containing
# BTree correctly
'Empty bucket from deleting all keys',
# 11; conflicting changes in an internal BTree node
'Conflicting changes in an internal BTree node',
# 11; conflicting changes in an internal BTree node
'Conflicting changes in an internal BTree node',
# 12; i2 or i3 was empty
'Empty bucket in a transaction',
# 12; i2 or i3 was empty
'Empty bucket in a transaction',
# 13; delete of first key, which causes change to parent node
'Delete of first key',
]
# 13; delete of first key, which causes change to parent node
'Delete of first key',
]
def __init__(self, p1, p2, p3, reason):
self.p1 = p1
......@@ -226,11 +237,14 @@ class BTreesConflictError(ConflictError):
self.p2,
self.p3,
self.reason)
def __str__(self):
return "BTrees conflict error at %d/%d/%d: %s" % (
self.p1, self.p2, self.p3, self.msgs[self.reason])
class DanglingReferenceError(POSError, transaction.interfaces.TransactionError):
class DanglingReferenceError(
POSError, transaction.interfaces.TransactionError):
"""An object has a persistent reference to a missing object.
If an object is stored and it has a reference to another object
......@@ -258,9 +272,11 @@ class DanglingReferenceError(POSError, transaction.interfaces.TransactionError):
class VersionError(POSError):
"""An error in handling versions occurred."""
class VersionCommitError(VersionError):
"""An invalid combination of versions was used in a version commit."""
class VersionLockError(VersionError, transaction.interfaces.TransactionError):
"""Modification to an object modified in an unsaved version.
......@@ -269,6 +285,7 @@ class VersionLockError(VersionError, transaction.interfaces.TransactionError):
"""
############################################################################
class UndoError(POSError):
"""An attempt was made to undo a non-undoable transaction."""
......@@ -279,6 +296,7 @@ class UndoError(POSError):
def __str__(self):
return _fmt_undo(self._oid, self._reason)
class MultipleUndoErrors(UndoError):
"""Several undo errors occurred during a single transaction."""
......@@ -290,33 +308,43 @@ class MultipleUndoErrors(UndoError):
def __str__(self):
return "\n".join([_fmt_undo(*pair) for pair in self._errs])
class StorageError(POSError):
"""Base class for storage based exceptions."""
class StorageTransactionError(StorageError):
"""An operation was invoked for an invalid transaction or state."""
class StorageSystemError(StorageError):
"""Panic! Internal storage error!"""
class MountedStorageError(StorageError):
"""Unable to access mounted storage."""
class ReadOnlyError(StorageError):
"""Unable to modify objects in a read-only storage."""
class TransactionTooLargeError(StorageTransactionError):
"""The transaction exhausted some finite storage resource."""
class ExportError(POSError):
"""An export file doesn't have the right format."""
class Unsupported(POSError):
"""A feature was used that is not supported by the storage."""
class ReadOnlyHistoryError(POSError):
"""Unable to add or modify objects in an historical connection."""
class InvalidObjectReference(POSError):
"""An object contains an invalid reference to another object.
......@@ -329,6 +357,7 @@ class InvalidObjectReference(POSError):
TODO: The exception ought to have a member that is the invalid object.
"""
class ConnectionStateError(POSError):
"""A Connection isn't in the required state for an operation.
......
......@@ -12,6 +12,7 @@
#
##############################################################################
from ZODB.DB import DB, connection
import sys
from persistent import TimeStamp
......@@ -24,5 +25,3 @@ sys.modules['ZODB.PersistentMapping'] = sys.modules['persistent.mapping']
sys.modules['ZODB.PersistentList'] = sys.modules['persistent.list']
del mapping, list, sys
from ZODB.DB import DB, connection
......@@ -11,13 +11,13 @@
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from zodbpickle import binary # noqa: F401 import unused
import sys
from six import PY3
IS_JYTHON = sys.platform.startswith('java')
_protocol = 3
from zodbpickle import binary
if not PY3:
# Python 2.x
......@@ -42,7 +42,8 @@ else:
# http://bugs.python.org/issue6784
import zodbpickle.pickle
HIGHEST_PROTOCOL = 3
from _compat_pickle import IMPORT_MAPPING, NAME_MAPPING
from _compat_pickle import IMPORT_MAPPING # noqa: F401 import unused
from _compat_pickle import NAME_MAPPING # noqa: F401 import unused
class Pickler(zodbpickle.pickle.Pickler):
def __init__(self, f, protocol=None):
......@@ -92,6 +93,7 @@ def PersistentPickler(persistent_id, *args, **kwargs):
p.persistent_id = persistent_id
return p
def PersistentUnpickler(find_global, load_persistent, *args, **kwargs):
"""
Returns a :class:`Unpickler` that will use the given `find_global` function
......@@ -104,7 +106,8 @@ def PersistentUnpickler(find_global, load_persistent, *args, **kwargs):
if find_global is not None:
unpickler.find_global = find_global
try:
unpickler.find_class = find_global # PyPy, zodbpickle, the non-c-accelerated version
# PyPy, zodbpickle, the non-c-accelerated version
unpickler.find_class = find_global
except AttributeError:
pass
if load_persistent is not None:
......@@ -118,7 +121,7 @@ try:
from cStringIO import StringIO as BytesIO
except ImportError:
# Python 3.x
from io import BytesIO
from io import BytesIO # noqa: F401 import unused
try:
......@@ -126,14 +129,15 @@ try:
from base64 import decodebytes, encodebytes
except ImportError:
# Python 2.x
from base64 import decodestring as decodebytes, encodestring as encodebytes
from base64 import decodestring as decodebytes # noqa: F401 import unused
from base64 import encodestring as encodebytes # noqa: F401 import unused
# Python 3.x: ``hasattr()`` swallows only AttributeError.
def py2_hasattr(obj, name):
try:
getattr(obj, name)
except:
except: # noqa: E722 do not use bare 'except'
return False
return True
......@@ -151,9 +155,10 @@ else:
try:
TEXT = unicode
except NameError: #pragma NO COVER Py3k
except NameError: # pragma NO COVER Py3k
TEXT = str
def ascii_bytes(x):
if isinstance(x, TEXT):
x = x.encode('ascii')
......
......@@ -35,7 +35,6 @@ from ZODB._compat import BytesIO
from ZODB._compat import PersistentUnpickler
from ZODB._compat import decodebytes
from ZODB._compat import ascii_bytes
from ZODB._compat import INT_TYPES
from ZODB._compat import PY3
......@@ -62,20 +61,21 @@ valid_modes = 'r', 'w', 'r+', 'a', 'c'
# of a weakref when the weakref object dies at the same time
# as the object it refers to. In other words, this doesn't work:
# self._ref = weakref.ref(self, lambda ref: ...)
# because the function never gets called (https://bitbucket.org/pypy/pypy/issue/2030).
# because the function never gets called
# (https://bitbucket.org/pypy/pypy/issue/2030).
# The Blob class used to use that pattern to clean up uncommitted
# files; now we use this module-level global (but still keep a
# reference in the Blob in case we need premature cleanup).
_blob_close_refs = []
@zope.interface.implementer(ZODB.interfaces.IBlob)
class Blob(persistent.Persistent):
"""A BLOB supports efficient handling of large data within ZODB."""
_p_blob_uncommitted = None # Filename of the uncommitted (dirty) data
_p_blob_committed = None # Filename of the committed data
_p_blob_ref = None # weakreference to self; also in _blob_close_refs
_p_blob_committed = None # Filename of the committed data
_p_blob_ref = None # weakreference to self; also in _blob_close_refs
readers = writers = None
......@@ -140,11 +140,10 @@ class Blob(persistent.Persistent):
if mode == 'c':
if (self._p_blob_uncommitted
or
not self._p_blob_committed
or
self._p_blob_committed.endswith(SAVEPOINT_SUFFIX)
):
or
not self._p_blob_committed
or
self._p_blob_committed.endswith(SAVEPOINT_SUFFIX)):
raise BlobError('Uncommitted changes')
return self._p_jar._storage.openCommittedBlobFile(
self._p_oid, self._p_serial)
......@@ -186,7 +185,7 @@ class Blob(persistent.Persistent):
if self._p_blob_uncommitted is None:
self._create_uncommitted_file()
result = BlobFile(self._p_blob_uncommitted, mode, self)
else: # 'r+' and 'a'
else: # 'r+' and 'a'
if self._p_blob_uncommitted is None:
# Create a new working copy
self._create_uncommitted_file()
......@@ -214,11 +213,10 @@ class Blob(persistent.Persistent):
def committed(self):
if (self._p_blob_uncommitted
or
not self._p_blob_committed
or
self._p_blob_committed.endswith(SAVEPOINT_SUFFIX)
):
or
not self._p_blob_committed
or
self._p_blob_committed.endswith(SAVEPOINT_SUFFIX)):
raise BlobError('Uncommitted changes')
result = self._p_blob_committed
......@@ -254,7 +252,7 @@ class Blob(persistent.Persistent):
try:
rename_or_copy_blob(filename, target, chmod=False)
except:
except: # noqa: E722 do not use bare 'except'
# Recover from the failed consumption: First remove the file, it
# might exist and mark the pointer to the uncommitted file.
self._p_blob_uncommitted = None
......@@ -317,6 +315,7 @@ class Blob(persistent.Persistent):
self._p_blob_uncommitted = self._p_blob_ref = None
return filename
class BlobFile(file):
"""A BlobFile that holds a file handle to actual blob data.
......@@ -348,8 +347,10 @@ class BlobFile(file):
# prohibit it on all versions.
raise TypeError("Pickling a BlobFile is not allowed")
_pid = str(os.getpid())
def log(msg, level=logging.INFO, subsys=_pid, exc_info=False):
message = "(%s) %s" % (subsys, msg)
logger.log(level, message, exc_info=exc_info)
......@@ -394,8 +395,8 @@ class FilesystemHelper(object):
layout = layout_marker.read().strip()
if layout != self.layout_name:
raise ValueError(
"Directory layout `%s` selected for blob directory %s, but "
"marker found for layout `%s`" %
"Directory layout `%s` selected for blob directory %s, but"
" marker found for layout `%s`" %
(self.layout_name, self.base_dir, layout))
def isSecure(self, path):
......@@ -541,6 +542,7 @@ class NoBlobsFileSystemHelper(object):
class BlobStorageError(Exception):
"""The blob storage encountered an invalid state."""
def auto_layout_select(path):
# A heuristic to look at a path and determine which directory layout to
# use.
......@@ -593,7 +595,7 @@ class BushyLayout(object):
directories = [b'0x' + hex_bytes[x:x+2]
for x in range(0, 16, 2)]
if bytes is not str: # py3
if bytes is not str: # py3
sep_bytes = os.path.sep.encode('ascii')
path_bytes = sep_bytes.join(directories)
return path_bytes.decode('ascii')
......@@ -618,8 +620,10 @@ class BushyLayout(object):
filename = "%s%s" % (utils.tid_repr(tid), BLOB_SUFFIX)
return os.path.join(oid_path, filename)
LAYOUTS['bushy'] = BushyLayout()
class LawnLayout(BushyLayout):
"""A shallow directory layout for blob directories.
......@@ -640,8 +644,10 @@ class LawnLayout(BushyLayout):
except (TypeError, binascii.Error):
raise ValueError('Not a valid OID path: `%s`' % path)
LAYOUTS['lawn'] = LawnLayout()
class BlobStorageMixin(object):
"""A mix-in to help storages support blobs."""
......@@ -738,7 +744,6 @@ class BlobStorage(BlobStorageMixin):
"""A wrapper/proxy storage to support blobs.
"""
def __init__(self, base_directory, storage, layout='automatic'):
assert not ZODB.interfaces.IBlobStorage.providedBy(storage)
self.__storage = storage
......@@ -780,8 +785,8 @@ class BlobStorage(BlobStorageMixin):
def tpc_abort(self, *arg, **kw):
# We need to override the base storage's abort instead of
# providing an _abort method because methods found on the proxied object
# aren't rebound to the proxy
# providing an _abort method because methods found on the proxied
# object aren't rebound to the proxy
self.__storage.tpc_abort(*arg, **kw)
self._blob_tpc_abort()
......@@ -814,7 +819,7 @@ class BlobStorage(BlobStorageMixin):
if exists:
files = os.listdir(oid_path)
files.sort()
latest = files[-1] # depends on ever-increasing tids
latest = files[-1] # depends on ever-increasing tids
files.remove(latest)
for f in files:
remove_committed(os.path.join(oid_path, f))
......@@ -905,7 +910,10 @@ class BlobStorage(BlobStorageMixin):
res = BlobStorage(base_dir, s)
return res
copied = logging.getLogger('ZODB.blob.copied').debug
def rename_or_copy_blob(f1, f2, chmod=True):
"""Try to rename f1 to f2, fallback to copy.
......@@ -926,6 +934,7 @@ def rename_or_copy_blob(f1, f2, chmod=True):
if chmod:
set_not_writable(f2)
if sys.platform == 'win32':
# On Windows, you can't remove read-only files, so make the
# file writable first.
......@@ -952,6 +961,7 @@ def find_global_Blob(module, class_):
if module == 'ZODB.blob' and class_ == 'Blob':
return Blob
def is_blob_record(record):
"""Check whether a database record is a blob record.
......@@ -960,7 +970,8 @@ def is_blob_record(record):
"""
if record and (b'ZODB.blob' in record):
unpickler = PersistentUnpickler(find_global_Blob, None, BytesIO(record))
unpickler = PersistentUnpickler(
find_global_Blob, None, BytesIO(record))
try:
return unpickler.load() is Blob
......@@ -971,6 +982,7 @@ def is_blob_record(record):
return False
def copyTransactionsFromTo(source, destination):
for trans in source.iterator():
destination.tpc_begin(trans, trans.tid, trans.status)
......@@ -990,10 +1002,10 @@ def copyTransactionsFromTo(source, destination):
with open(name, 'wb') as df:
utils.cp(sf, df)
destination.restoreBlob(record.oid, record.tid, record.data,
name, record.data_txn, trans)
name, record.data_txn, trans)
else:
destination.restore(record.oid, record.tid, record.data,
'', record.data_txn, trans)
'', record.data_txn, trans)
destination.tpc_vote(trans)
destination.tpc_finish(trans)
......@@ -1001,6 +1013,8 @@ def copyTransactionsFromTo(source, destination):
NO_WRITE = ~ (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
READ_PERMS = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
def set_not_writable(path):
perms = stat.S_IMODE(os.lstat(path).st_mode)
......
......@@ -25,6 +25,7 @@ from ZODB._compat import NAME_MAPPING
broken_cache = {}
@zope.interface.implementer(ZODB.interfaces.IBroken)
class Broken(object):
"""Broken object base class
......@@ -99,7 +100,6 @@ class Broken(object):
>>> broken_cache.clear()
"""
__Broken_state__ = __Broken_initargs__ = None
__name__ = 'broken object'
......@@ -131,6 +131,7 @@ class Broken(object):
def __setattr__(self, name, value):
raise BrokenModified("Can't change broken objects")
def find_global(modulename, globalname,
# These are *not* optimizations. Callers can override these.
Broken=Broken, type=type,
......@@ -220,6 +221,7 @@ def find_global(modulename, globalname,
broken_cache[(modulename, globalname)] = class_
return class_
def rebuild(modulename, globalname, *args):
"""Recreate a broken object, possibly recreating the missing class
......@@ -257,10 +259,12 @@ def rebuild(modulename, globalname, *args):
class_ = find_global(modulename, globalname)
return class_.__new__(class_, *args)
class BrokenModified(TypeError):
"""Attempt to modify a broken object
"""
class PersistentBroken(Broken, persistent.Persistent):
r"""Persistent broken objects
......@@ -347,6 +351,7 @@ class PersistentBroken(Broken, persistent.Persistent):
def __getnewargs__(self):
return self.__Broken_newargs__
def persistentBroken(class_):
try:
return class_.__dict__['__Broken_Persistent__']
......@@ -356,5 +361,5 @@ def persistentBroken(class_):
(PersistentBroken, class_),
{'__module__': class_.__module__},
)
)
)
return class_.__dict__['__Broken_Persistent__']
......@@ -29,18 +29,21 @@ _db_schema = None
s_schema_path = os.path.join(ZODB.__path__[0], "storage.xml")
_s_schema = None
def getDbSchema():
global _db_schema
if _db_schema is None:
_db_schema = ZConfig.loadSchema(db_schema_path)
return _db_schema
def getStorageSchema():
global _s_schema
if _s_schema is None:
_s_schema = ZConfig.loadSchema(s_schema_path)
return _s_schema
def databaseFromString(s):
"""Create a database from a database-configuration string.
......@@ -56,6 +59,7 @@ def databaseFromString(s):
"""
return databaseFromFile(StringIO(s))
def databaseFromFile(f):
"""Create a database from a file object that provides configuration.
......@@ -64,6 +68,7 @@ def databaseFromFile(f):
config, handle = ZConfig.loadConfigFile(getDbSchema(), f)
return databaseFromConfig(config.database)
def databaseFromURL(url):
"""Load a database from URL (or file name) that provides configuration.
......@@ -72,6 +77,7 @@ def databaseFromURL(url):
config, handler = ZConfig.loadConfig(getDbSchema(), url)
return databaseFromConfig(config.database)
def databaseFromConfig(database_factories):
databases = {}
first = None
......@@ -82,17 +88,20 @@ def databaseFromConfig(database_factories):
return first
def storageFromString(s):
"""Create a storage from a storage-configuration string.
"""
return storageFromFile(StringIO(s))
def storageFromFile(f):
"""Create a storage from a file object providing storage-configuration.
"""
config, handle = ZConfig.loadConfigFile(getStorageSchema(), f)
return storageFromConfig(config.storage)
def storageFromURL(url):
"""\
Create a storage from a URL (or file name) providing storage-configuration.
......@@ -100,9 +109,11 @@ def storageFromURL(url):
config, handler = ZConfig.loadConfig(getStorageSchema(), url)
return storageFromConfig(config.storage)
def storageFromConfig(section):
return section.open()
class BaseConfig(object):
"""Object representing a configured storage or database.
......@@ -124,6 +135,7 @@ class BaseConfig(object):
"""Open and return the storage object."""
raise NotImplementedError
class ZODBDatabase(BaseConfig):
def open(self, databases=None):
......@@ -150,21 +162,23 @@ class ZODBDatabase(BaseConfig):
cache_size_bytes=section.cache_size_bytes,
historical_pool_size=section.historical_pool_size,
historical_cache_size=section.historical_cache_size,
historical_cache_size_bytes=section.historical_cache_size_bytes,
historical_cache_size_bytes=section.historical_cache_size_bytes, # noqa: E501 line too long
historical_timeout=section.historical_timeout,
database_name=section.database_name or self.name or '',
databases=databases,
**options)
except:
except: # noqa: E722 do not use bare 'except'
storage.close()
raise
class MappingStorage(BaseConfig):
def open(self):
from ZODB.MappingStorage import MappingStorage
return MappingStorage(self.config.name)
class DemoStorage(BaseConfig):
def open(self):
......@@ -181,6 +195,7 @@ class DemoStorage(BaseConfig):
from ZODB.DemoStorage import DemoStorage
return DemoStorage(self.config.name, base=base, changes=changes)
class FileStorage(BaseConfig):
def open(self):
......@@ -206,6 +221,7 @@ class FileStorage(BaseConfig):
return FileStorage(config.path, **options)
class BlobStorage(BaseConfig):
def open(self):
......@@ -225,7 +241,8 @@ class ZEOClient(BaseConfig):
if self.config.blob_cache_size is not None:
options['blob_cache_size'] = self.config.blob_cache_size
if self.config.blob_cache_size_check is not None:
options['blob_cache_size_check'] = self.config.blob_cache_size_check
options['blob_cache_size_check'] = (
self.config.blob_cache_size_check)
if self.config.client_label is not None:
options['client_label'] = self.config.client_label
......@@ -249,6 +266,7 @@ class ZEOClient(BaseConfig):
realm=self.config.realm,
**options)
class BDBStorage(BaseConfig):
def open(self):
......@@ -261,12 +279,14 @@ class BDBStorage(BaseConfig):
setattr(bconf, name, getattr(self.config, name))
return storageclass(self.config.envdir, config=bconf)
class BDBMinimalStorage(BDBStorage):
def get_storageclass(self):
import BDBStorage.BDBMinimalStorage
return BDBStorage.BDBMinimalStorage.BDBMinimalStorage
class BDBFullStorage(BDBStorage):
def get_storageclass(self):
......
......@@ -14,21 +14,29 @@
import persistent.mapping
class fixer(object):
def __of__(self, parent):
def __setstate__(state, self=parent):
self._container=state
self._container = state
del self.__setstate__
return __setstate__
fixer=fixer()
class hack(object): pass
hack=hack()
fixer = fixer()
class hack(object):
pass
hack = hack()
def __basicnew__():
r=persistent.mapping.PersistentMapping()
r.__setstate__=fixer
r = persistent.mapping.PersistentMapping()
r.__setstate__ = fixer
return r
hack.__basicnew__=__basicnew__
hack.__basicnew__ = __basicnew__
......@@ -14,5 +14,5 @@
try:
from zope.event import notify
except ImportError:
notify = lambda event: None
def notify(event):
return None
......@@ -55,17 +55,21 @@ from ZODB._compat import _protocol
def num2str(n):
return struct.pack(">Q", n)[2:]
def str2num(s):
return struct.unpack(">Q", b"\000\000" + s)[0]
def prefix_plus_one(s):
num = str2num(s)
return num2str(num + 1)
def prefix_minus_one(s):
num = str2num(s)
return num2str(num - 1)
def ensure_bytes(s):
# on Python 3 we might pickle bytes and unpickle unicode strings
return s.encode('ascii') if not isinstance(s, bytes) else s
......@@ -80,11 +84,11 @@ class fsIndex(object):
def __getstate__(self):
return dict(
state_version = 1,
_data = [(k, v.toString())
for (k, v) in six.iteritems(self._data)
]
)
state_version=1,
_data=[(k, v.toString())
for (k, v) in six.iteritems(self._data)
]
)
def __setstate__(self, state):
version = state.pop('state_version', 0)
......@@ -96,13 +100,13 @@ class fsIndex(object):
self._data = OOBTree([
(ensure_bytes(k), v)
for (k, v) in self._data.items()
])
])
def _setstate_1(self, state):
self._data = OOBTree([
(ensure_bytes(k), fsBucket().fromString(ensure_bytes(v)))
for (k, v) in state['_data']
])
])
def __getitem__(self, key):
assert isinstance(key, bytes)
......@@ -246,7 +250,7 @@ class fsIndex(object):
else:
try:
smallest_suffix = tree.minKey(key[6:])
except ValueError: # 'empty tree' (no suffix >= arg)
except ValueError: # 'empty tree' (no suffix >= arg)
next_prefix = prefix_plus_one(smallest_prefix)
smallest_prefix = self._data.minKey(next_prefix)
tree = self._data[smallest_prefix]
......@@ -270,7 +274,7 @@ class fsIndex(object):
else:
try:
biggest_suffix = tree.maxKey(key[6:])
except ValueError: # 'empty tree' (no suffix <= arg)
except ValueError: # 'empty tree' (no suffix <= arg)
next_prefix = prefix_minus_one(biggest_prefix)
biggest_prefix = self._data.maxKey(next_prefix)
tree = self._data[biggest_prefix]
......
......@@ -94,12 +94,15 @@ def die(mess='', show_docstring=False):
print(__doc__ % sys.argv[0], file=sys.stderr)
sys.exit(1)
class ErrorFound(Exception):
pass
def error(mess, *args):
raise ErrorFound(mess % args)
def read_txn_header(f, pos, file_size, outp, ltid):
# Read the transaction record
f.seek(pos)
......@@ -107,7 +110,7 @@ def read_txn_header(f, pos, file_size, outp, ltid):
if len(h) < 23:
raise EOFError
tid, stl, status, ul, dl, el = unpack(">8s8scHHH",h)
tid, stl, status, ul, dl, el = unpack(">8s8scHHH", h)
status = as_text(status)
tl = u64(stl)
......@@ -157,6 +160,7 @@ def read_txn_header(f, pos, file_size, outp, ltid):
return pos, result, tid
def truncate(f, pos, file_size, outp):
"""Copy data from pos to end of f to a .trNNN file."""
......@@ -176,6 +180,7 @@ def truncate(f, pos, file_size, outp):
f.seek(pos)
tr.close()
def copy(src, dst, n):
while n:
buf = src.read(8096)
......@@ -186,6 +191,7 @@ def copy(src, dst, n):
dst.write(buf)
n -= len(buf)
def scan(f, pos):
"""Return a potential transaction location following pos in f.
......@@ -206,20 +212,21 @@ def scan(f, pos):
s = 0
while 1:
l = data.find(b".", s)
if l < 0:
l_ = data.find(b".", s)
if l_ < 0:
pos += len(data)
break
# If we are less than 8 bytes from the end of the
# string, we need to read more data.
s = l + 1
s = l_ + 1
if s > len(data) - 8:
pos += l
pos += l_
break
tl = u64(data[s:s+8])
if tl < pos:
return pos + s + 8
def iprogress(i):
if i % 2:
print(".", end=' ')
......@@ -227,10 +234,12 @@ def iprogress(i):
print((i/2) % 10, end=' ')
sys.stdout.flush()
def progress(p):
for i in range(p):
iprogress(i)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "fv:pP:")
......@@ -256,6 +265,7 @@ def main():
recover(inp, outp, verbose, partial, force, pack)
def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
print("Recovering", inp, "into", outp)
......@@ -266,7 +276,7 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
if f.read(4) != ZODB.FileStorage.packed_version:
die("input is not a file storage")
f.seek(0,2)
f.seek(0, 2)
file_size = f.tell()
ofs = ZODB.FileStorage.FileStorage(outp, create=1)
......@@ -332,11 +342,11 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
for r in txn:
if verbose > 1:
if r.data is None:
l = "bp"
l_ = "bp"
else:
l = len(r.data)
l_ = len(r.data)
print("%7d %s %s" % (u64(r.oid), l))
print("%7d %s" % (u64(r.oid), l_))
ofs.restore(r.oid, r.tid, r.data, '', r.data_txn,
txn)
nrec += 1
......@@ -370,7 +380,6 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
prog1 = prog1 + 1
iprogress(prog1)
bad = file_size - undone - ofs._pos
print("\n%s bytes removed during recovery" % bad)
......@@ -385,5 +394,6 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
ofs.close()
f.close()
if __name__ == "__main__":
main()
......@@ -100,6 +100,7 @@ class TxnHeader(object):
tlen = u64(self._file.read(8))
return TxnHeader(self._file, self._pos - (tlen + 8))
class DataHeader(object):
"""Object representing a data record header.
......@@ -111,7 +112,7 @@ class DataHeader(object):
txn_pos 24-32 position of txn header
version_len 32-34 length of version (always 0)
data_len 34-42 length of data
"""
def __init__(self, file, pos):
......@@ -129,15 +130,16 @@ class DataHeader(object):
self.prev_rec_pos = u64(prev_rec_pos)
self.txn_pos = u64(txn_pos)
self.data_len = u64(data_len)
def next_offset(self):
"""Return offset of next record."""
off = self._pos + self.data_len
off += DATA_HDR_LEN
if self.data_len == 0:
off += 8 # backpointer
off += 8 # backpointer
return off
def prev_txn(f):
"""Return transaction located before current file position."""
f.seek(-8, 1)
......
......@@ -267,6 +267,7 @@ class IConnection(Interface):
separate object.
"""
class IStorageWrapper(Interface):
"""Storage wrapper interface
......@@ -296,7 +297,7 @@ class IStorageWrapper(Interface):
This interface may be implemented by storage adapters or other
intermediaries. For example, a storage adapter that provides
encryption and/or compresssion will apply record transformations
encryption and/or compression will apply record transformations
in it's references method.
"""
......@@ -343,7 +344,8 @@ class IStorageWrapper(Interface):
"""Return untransformed data
"""
IStorageDB = IStorageWrapper # for backward compatibility
IStorageDB = IStorageWrapper # for backward compatibility
class IDatabase(IStorageDB):
......@@ -371,7 +373,6 @@ class IDatabase(IStorageDB):
this attribute.
""")
def open(transaction_manager=None, serial=''):
"""Return an IConnection object for use by application code.
......@@ -421,7 +422,6 @@ class IDatabase(IStorageDB):
also included if they don't conflict with the keys above.
"""
def pack(t=None, days=0):
"""Pack the storage, deleting unused object revisions.
......@@ -433,7 +433,7 @@ class IDatabase(IStorageDB):
usually an expensive operation.
There are two optional arguments that can be used to set the
pack time: t, pack time in seconds since the epcoh, and days,
pack time: t, pack time in seconds since the epoch, and days,
the number of days to subtract from t or from the current
time if t is not specified.
"""
......@@ -539,6 +539,7 @@ class IDatabase(IStorageDB):
should also close all the Connections.
"""
class IStorageTransactionMetaData(Interface):
"""Provide storage transaction meta data.
......@@ -628,13 +629,13 @@ class IStorage(Interface):
The format and interpretation of this name is storage
dependent. It could be a file name, a database name, etc..
This is used soley for informational purposes.
This is used solely for informational purposes.
"""
def getSize():
"""An approximate size of the database, in bytes.
This is used soley for informational purposes.
This is used solely for informational purposes.
"""
def history(oid, size=1):
......@@ -660,7 +661,7 @@ class IStorage(Interface):
user_name
The bytes user identifier, if any (or an empty string) of the
user on whos behalf the revision was committed.
user on whose behalf the revision was committed.
description
The bytes transaction description for the transaction that
......@@ -704,7 +705,7 @@ class IStorage(Interface):
def __len__():
"""The approximate number of objects in the storage
This is used soley for informational purposes.
This is used solely for informational purposes.
"""
def loadBefore(oid, tid):
......@@ -821,7 +822,7 @@ class IStorage(Interface):
This call is ignored is the storage is not participating in
two-phase commit or if the given transaction is not the same
as the transaction the storage is commiting.
as the transaction the storage is committing.
"""
def tpc_begin(transaction):
......@@ -837,7 +838,7 @@ class IStorage(Interface):
current transaction ends (commits or aborts).
"""
def tpc_finish(transaction, func = lambda tid: None):
def tpc_finish(transaction, func=lambda tid: None):
"""Finish the transaction, making any transaction changes permanent.
Changes must be made permanent at this point.
......@@ -863,7 +864,7 @@ class IStorage(Interface):
The argument is the same object passed to tpc_begin.
This call raises a StorageTransactionError if the storage
isn't participating in two-phase commit or if it is commiting
isn't participating in two-phase commit or if it is committing
a different transaction.
If a transaction can be committed by a storage, then the
......@@ -901,7 +902,7 @@ class IMultiCommitStorage(IStorage):
the return value is always None.
"""
def tpc_finish(transaction, func = lambda tid: None):
def tpc_finish(transaction, func=lambda tid: None):
"""Finish the transaction, making any transaction changes permanent.
See IStorage.store. For objects implementing this interface,
......@@ -954,7 +955,6 @@ class IStorageRestoreable(IStorage):
# including the existing FileStorage implementation), that
# failed to take into account records after the pack time.
def restore(oid, serial, data, version, prev_txn, transaction):
"""Write data already committed in a separate database
......@@ -996,6 +996,7 @@ class IStorageRecordInformation(Interface):
data = Attribute("The data record, bytes")
data_txn = Attribute("The previous transaction id, bytes")
class IStorageTransactionInformation(IStorageTransactionMetaData):
"""Provide information about a storage transaction.
......@@ -1003,7 +1004,7 @@ class IStorageTransactionInformation(IStorageTransactionMetaData):
Note that this may contain a status field used by FileStorage to
support packing. At some point, this will go away when FileStorage
has a better pack algoritm.
has a better pack algorithm.
"""
tid = Attribute("Transaction id")
......@@ -1034,6 +1035,7 @@ class IStorageIteration(Interface):
"""
class IStorageUndoable(IStorage):
"""A storage supporting transactional undo.
"""
......@@ -1245,6 +1247,7 @@ class IMVCCStorage(IStorage):
A POSKeyError is raised if there is no record for the object id.
"""
class IMVCCPrefetchStorage(IMVCCStorage):
def prefetch(oids):
......@@ -1254,6 +1257,7 @@ class IMVCCPrefetchStorage(IMVCCStorage):
more than once.
"""
class IMVCCAfterCompletionStorage(IMVCCStorage):
def afterCompletion():
......@@ -1264,6 +1268,7 @@ class IMVCCAfterCompletionStorage(IMVCCStorage):
See ``transaction.interfaces.ISynchronizer.afterCompletion``.
"""
class IStorageCurrentRecordIteration(IStorage):
def record_iternext(next=None):
......@@ -1271,6 +1276,7 @@ class IStorageCurrentRecordIteration(IStorage):
Use like this:
>>> storage = ...
>>> next = None
>>> while 1:
... oid, tid, data, next = storage.record_iternext(next)
......@@ -1280,24 +1286,26 @@ class IStorageCurrentRecordIteration(IStorage):
"""
class IExternalGC(IStorage):
def deleteObject(oid, serial, transaction):
"""Mark an object as deleted
def deleteObject(oid, serial, transaction):
"""Mark an object as deleted
This method marks an object as deleted via a new object
revision. Subsequent attempts to load current data for the
object will fail with a POSKeyError, but loads for
non-current data will suceed if there are previous
non-delete records. The object will be removed from the
storage when all not-delete records are removed.
This method marks an object as deleted via a new object
revision. Subsequent attempts to load current data for the
object will fail with a POSKeyError, but loads for
non-current data will succeed if there are previous
non-delete records. The object will be removed from the
storage when all not-delete records are removed.
The serial argument must match the most recently committed
serial for the object. This is a seat belt.
The serial argument must match the most recently committed
serial for the object. This is a seat belt.
This method can only be called in the first phase of 2-phase
commit.
"""
This method can only be called in the first phase of 2-phase
commit.
"""
class ReadVerifyingStorage(IStorage):
......@@ -1315,6 +1323,7 @@ class ReadVerifyingStorage(IStorage):
through the end of the transaction.
"""
class IBlob(Interface):
"""A BLOB supports efficient handling of large data within ZODB."""
......@@ -1325,7 +1334,7 @@ class IBlob(Interface):
mode: Mode to open the file with. Possible values: r,w,r+,a,c
The mode 'c' is similar to 'r', except that an orinary file
The mode 'c' is similar to 'r', except that an ordinary file
object is returned and may be used in a separate transaction
and after the blob's database connection has been closed.
......@@ -1335,8 +1344,8 @@ class IBlob(Interface):
"""Return a file name for committed data.
The returned file name may be opened for reading or handed to
other processes for reading. The file name isn't guarenteed
to be valid indefinately. The file may be removed in the
other processes for reading. The file name isn't guaranteed
to be valid indefinitely. The file may be removed in the
future as a result of garbage collection depending on system
configuration.
......@@ -1412,6 +1421,7 @@ class IBlobStorage(Interface):
If Blobs use this, then commits can be performed with a simple rename.
"""
class IBlobStorageRestoreable(IBlobStorage, IStorageRestoreable):
def restoreBlob(oid, serial, data, blobfilename, prev_txn, transaction):
......@@ -1446,6 +1456,7 @@ class IBroken(Interface):
__Broken_initargs__ = Attribute("Arguments passed to __init__.")
__Broken_state__ = Attribute("Value passed to __setstate__.")
class BlobError(Exception):
pass
......
......@@ -12,6 +12,7 @@ import zope.interface
from . import interfaces, serialize, POSException
from .utils import p64, u64, Lock, oid_repr, tid_repr
class Base(object):
_copy_methods = (
......@@ -19,7 +20,7 @@ class Base(object):
'loadBlob', 'openCommittedBlobFile',
'isReadOnly', 'supportsUndo', 'undoLog', 'undoInfo',
'temporaryDirectory',
)
)
def __init__(self, storage):
self._storage = storage
......@@ -37,6 +38,7 @@ class Base(object):
def __len__(self):
return len(self._storage)
class MVCCAdapter(Base):
def __init__(self, storage):
......@@ -63,6 +65,7 @@ class MVCCAdapter(Base):
self._instances.remove(instance)
closed = False
def close(self):
if not self.closed:
self.closed = True
......@@ -92,14 +95,15 @@ class MVCCAdapter(Base):
def pack(self, pack_time, referencesf):
return self._storage.pack(pack_time, referencesf)
class MVCCAdapterInstance(Base):
_copy_methods = Base._copy_methods + (
'loadSerial', 'new_oid', 'tpc_vote',
'checkCurrentSerialInTransaction', 'tpc_abort',
)
)
_start = None # Transaction start time
_start = None # Transaction start time
_ltid = b'' # Last storage transaction id
def __init__(self, base):
......@@ -107,7 +111,7 @@ class MVCCAdapterInstance(Base):
Base.__init__(self, base._storage)
self._lock = Lock()
self._invalidations = set()
self._sync = getattr(self._storage, 'sync', lambda : None)
self._sync = getattr(self._storage, 'sync', lambda: None)
def release(self):
self._base._release(self)
......@@ -175,8 +179,8 @@ class MVCCAdapterInstance(Base):
# into account, and raise ReadConflictError only in the presence of
# database being simultaneously updated from back of its log.
raise POSException.ReadConflictError(
"load %s @%s: object deleted, likely by simultaneous pack" %
(oid_repr(oid), tid_repr(p64(u64(self._start) - 1))))
"load %s @%s: object deleted, likely by simultaneous pack" %
(oid_repr(oid), tid_repr(p64(u64(self._start) - 1))))
return r[:2]
......@@ -189,8 +193,8 @@ class MVCCAdapterInstance(Base):
else:
raise
_modified = None # Used to keep track of oids modified within a
# transaction, so we can invalidate them later.
_modified = None # Used to keep track of oids modified within a
# transaction, so we can invalidate them later.
def tpc_begin(self, transaction):
self._storage.tpc_begin(transaction)
......@@ -205,7 +209,7 @@ class MVCCAdapterInstance(Base):
oid, serial, data, blobfilename, '', transaction)
self._modified.add(oid)
def tpc_finish(self, transaction, func = lambda tid: None):
def tpc_finish(self, transaction, func=lambda tid: None):
modified = self._modified
self._modified = None
......@@ -216,9 +220,11 @@ class MVCCAdapterInstance(Base):
return self._storage.tpc_finish(transaction, invalidate_finish)
def read_only_writer(self, *a, **kw):
raise POSException.ReadOnlyError
class HistoricalStorageAdapter(Base):
"""Adapt a storage to a historical storage
"""
......@@ -226,7 +232,7 @@ class HistoricalStorageAdapter(Base):
_copy_methods = Base._copy_methods + (
'loadSerial', 'tpc_begin', 'tpc_finish', 'tpc_abort', 'tpc_vote',
'checkCurrentSerialInTransaction',
)
)
def __init__(self, storage, before=None):
Base.__init__(self, storage)
......@@ -267,7 +273,7 @@ class UndoAdapterInstance(Base):
_copy_methods = Base._copy_methods + (
'tpc_abort',
)
)
def __init__(self, base):
self._base = base
......@@ -293,7 +299,7 @@ class UndoAdapterInstance(Base):
if result:
self._undone.update(result)
def tpc_finish(self, transaction, func = lambda tid: None):
def tpc_finish(self, transaction, func=lambda tid: None):
def invalidate_finish(tid):
self._base._invalidate_finish(tid, self._undone, None)
......
......@@ -63,6 +63,7 @@ class _p_DataDescr(object):
def __delete__(self, inst):
raise AttributeError(self.__name__)
class _p_oid_or_jar_Descr(_p_DataDescr):
# Special descr for _p_oid and _p_jar that loads
# state when set if both are set and _p_changed is None
......@@ -78,11 +79,11 @@ class _p_oid_or_jar_Descr(_p_DataDescr):
jar = get('_p_jar')
if (jar is not None
and get('_p_oid') is not None
and get('_p_changed') is None
):
and get('_p_oid') is not None
and get('_p_changed') is None):
jar.setstate(inst)
class _p_ChangedDescr(object):
# descriptor to handle special weird semantics of _p_changed
......@@ -99,6 +100,7 @@ class _p_ChangedDescr(object):
def __delete__(self, inst):
inst._p_invalidate()
class _p_MethodDescr(object):
"""Provide unassignable class attributes
"""
......@@ -120,6 +122,7 @@ class _p_MethodDescr(object):
special_class_descrs = '__dict__', '__weakref__'
class PersistentMetaClass(type):
_p_jar = _p_oid_or_jar_Descr('_p_jar')
......@@ -148,8 +151,8 @@ class PersistentMetaClass(type):
and
(get('_p_oid') is not None)
and
(get('_p_changed') == False)
):
(get('_p_changed') is False)
):
self._p_changed = True
data_manager.register(self)
......@@ -177,7 +180,6 @@ class PersistentMetaClass(type):
_p_invalidate = _p_MethodDescr(_p_invalidate)
def __getstate__(self):
return (self.__bases__,
dict([(k, v) for (k, v) in self.__dict__.items()
......@@ -185,7 +187,7 @@ class PersistentMetaClass(type):
or k.startswith('_v_')
or k in special_class_descrs
)
]),
]),
)
__getstate__ = _p_MethodDescr(__getstate__)
......
......@@ -9,7 +9,6 @@ from ZODB.FileStorage import FileStorage
from ZODB._compat import PersistentUnpickler, BytesIO
class FakeError(Exception):
def __init__(self, module, name):
Exception.__init__(self)
......@@ -41,20 +40,22 @@ class Report(object):
self.FOIDS = 0
self.FBYTES = 0
def shorten(s, n):
l = len(s)
if l <= n:
length = len(s)
if length <= n:
return s
while len(s) + 3 > n: # account for ...
while len(s) + 3 > n: # account for ...
i = s.find(".")
if i == -1:
# In the worst case, just return the rightmost n bytes
return s[-n:]
else:
s = s[i + 1:]
l = len(s)
length = len(s)
return "..." + s
def report(rep):
print("Processed %d records in %d transactions" % (rep.OIDS, rep.TIDS))
print("Average record size is %7.2f bytes" % (rep.DBYTES * 1.0 / rep.OIDS))
......@@ -63,8 +64,8 @@ def report(rep):
print("Types used:")
fmt = "%-46s %7s %9s %6s %7s"
fmtp = "%-46s %7d %9d %5.1f%% %7.2f" # per-class format
fmts = "%46s %7d %8dk %5.1f%% %7.2f" # summary format
fmtp = "%-46s %7d %9d %5.1f%% %7.2f" # per-class format
fmts = "%46s %7d %8dk %5.1f%% %7.2f" # summary format
print(fmt % ("Class Name", "Count", "TBytes", "Pct", "AvgSize"))
print(fmt % ('-'*46, '-'*7, '-'*9, '-'*5, '-'*7))
typemap = sorted(rep.TYPEMAP)
......@@ -76,8 +77,9 @@ def report(rep):
pct, rep.TYPESIZE[t] * 1.0 / rep.TYPEMAP[t]))
print(fmt % ('='*46, '='*7, '='*9, '='*5, '='*7))
print("%46s %7d %9s %6s %6.2fk" % ('Total Transactions', rep.TIDS, ' ',
' ', rep.DBYTES * 1.0 / rep.TIDS / 1024.0))
print("%46s %7d %9s %6s %6.2fk" % (
'Total Transactions', rep.TIDS, ' ', ' ',
rep.DBYTES * 1.0 / rep.TIDS / 1024.0))
print(fmts % ('Total Records', rep.OIDS, rep.DBYTES / 1024.0, cumpct,
rep.DBYTES * 1.0 / rep.OIDS))
......@@ -89,6 +91,7 @@ def report(rep):
rep.FBYTES * 100.0 / rep.DBYTES,
rep.FBYTES * 1.0 / rep.FOIDS))
def analyze(path):
fs = FileStorage(path, read_only=1)
fsi = fs.iterator()
......@@ -97,11 +100,13 @@ def analyze(path):
analyze_trans(report, txn)
return report
def analyze_trans(report, txn):
report.TIDS += 1
for rec in txn:
analyze_rec(report, rec)
def get_type(record):
try:
unpickled = FakeUnpickler(BytesIO(record.data)).load()
......@@ -114,6 +119,7 @@ def get_type(record):
else:
return str(classinfo)
def analyze_rec(report, record):
oid = record.oid
report.OIDS += 1
......@@ -121,7 +127,7 @@ def analyze_rec(report, record):
# No pickle -- aborted version or undo of object creation.
return
try:
size = len(record.data) # Ignores various overhead
size = len(record.data) # Ignores various overhead
report.DBYTES += size
if oid not in report.OIDMAP:
type = get_type(record)
......@@ -142,6 +148,7 @@ def analyze_rec(report, record):
except Exception as err:
print(err)
if __name__ == "__main__":
path = sys.argv[1]
report(analyze(path))
......@@ -19,18 +19,21 @@ oids_seen = {}
# Append (obj, path) to L if and only if obj is a persistent object
# and we haven't seen it before.
def add_if_new_persistent(L, obj, path):
global oids_seen
getattr(obj, '_', None) # unghostify
getattr(obj, '_', None) # unghostify
if hasattr(obj, '_p_oid'):
oid = obj._p_oid
if oid not in oids_seen:
L.append((obj, path))
oids_seen[oid] = 1
def get_subobjects(obj):
getattr(obj, '_', None) # unghostify
getattr(obj, '_', None) # unghostify
sub = []
try:
attrs = obj.__dict__.items()
......@@ -55,22 +58,23 @@ def get_subobjects(obj):
while 1:
try:
elt = obj[i]
except:
except: # noqa: E722 do not use bare 'except'
break
sub.append(("[%d]" % i, elt))
i += 1
return sub
def main(fname=None):
if fname is None:
import sys
try:
fname, = sys.argv[1:]
except:
except: # noqa: E722 do not use bare 'except'
print(__doc__)
sys.exit(2)
fs = FileStorage(fname, read_only=1)
cn = ZODB.DB(fs).open()
rt = cn.root()
......@@ -116,5 +120,6 @@ def main(fname=None):
print("total", len(fs._index), "found", found)
if __name__ == "__main__":
main()
......@@ -43,9 +43,11 @@ import sys
from ZODB.FileStorage.fsoids import Tracer
def usage():
print(__doc__)
def main():
import getopt
......@@ -64,7 +66,7 @@ def main():
c = Tracer(args[0])
for oid in args[1:]:
as_int = int(oid, 0) # 0 == auto-detect base
as_int = int(oid, 0) # 0 == auto-detect base
c.register_oids(as_int)
if path is not None:
for line in open(path):
......@@ -75,5 +77,6 @@ def main():
c.run()
c.report()
if __name__ == "__main__":
main()
......@@ -74,6 +74,8 @@ from BTrees.QQBTree import QQBTree
# There's a problem with oid. 'data' is its pickle, and 'serial' its
# serial number. 'missing' is a list of (oid, class, reason) triples,
# explaining what the problem(s) is(are).
def report(oid, data, serial, missing):
from_mod, from_class = get_pickle_metadata(data)
if len(missing) > 1:
......@@ -92,6 +94,7 @@ def report(oid, data, serial, missing):
print("\toid %s %s: %r" % (oid_repr(oid), reason, description))
print()
def main(path=None):
verbose = 0
if path is None:
......@@ -105,7 +108,6 @@ def main(path=None):
path, = args
fs = FileStorage(path, read_only=1)
# Set of oids in the index that failed to load due to POSKeyError.
......@@ -122,7 +124,7 @@ def main(path=None):
# build {pos -> oid} index that is reverse to {oid -> pos} fs._index
# we'll need this to iterate objects in order of ascending file position to
# optimize disk IO.
pos2oid = QQBTree() # pos -> u64(oid)
pos2oid = QQBTree() # pos -> u64(oid)
for oid, pos in fs._index.iteritems():
pos2oid[pos] = u64(oid)
......@@ -137,14 +139,14 @@ def main(path=None):
raise
except POSKeyError:
undone[oid] = 1
except:
except: # noqa: E722 do not use bare 'except'
if verbose:
traceback.print_exc()
noload[oid] = 1
# pass 2: go through all objects again and verify that their references do
# not point to problematic object set. Iterate objects in order of ascending
# file position to optimize disk IO.
# not point to problematic object set. Iterate objects in order of
# ascending file position to optimize disk IO.
inactive = noload.copy()
inactive.update(undone)
for oid64 in pos2oid.itervalues():
......@@ -153,7 +155,7 @@ def main(path=None):
continue
data, serial = load_current(fs, oid)
refs = get_refs(data)
missing = [] # contains 3-tuples of oid, klass-metadata, reason
missing = [] # contains 3-tuples of oid, klass-metadata, reason
for ref, klass in refs:
if klass is None:
klass = '<unknown>'
......@@ -166,5 +168,6 @@ def main(path=None):
if missing:
report(oid, data, serial, missing)
if __name__ == "__main__":
main()
......@@ -9,6 +9,7 @@ from six.moves import filter
rx_txn = re.compile(r"tid=([0-9a-f]+).*size=(\d+)")
rx_data = re.compile(r"oid=([0-9a-f]+) size=(\d+) class=(\S+)")
def sort_byhsize(seq, reverse=False):
L = [(v.size(), k, v) for k, v in seq]
L.sort()
......@@ -16,6 +17,7 @@ def sort_byhsize(seq, reverse=False):
L.reverse()
return [(k, v) for n, k, v in L]
class Histogram(dict):
def add(self, size):
......@@ -93,6 +95,7 @@ class Histogram(dict):
i * binsize, n, p, pc, "*" * (n // dot)))
print()
def class_detail(class_size):
# summary of classes
fmt = "%5s %6s %6s %6s %-50.50s"
......@@ -110,6 +113,7 @@ def class_detail(class_size):
continue
h.report("Object size for %s" % klass, usebins=True)
def revision_detail(lifetimes, classes):
# Report per-class details for any object modified more than once
for name, oids in six.iteritems(classes):
......@@ -124,17 +128,18 @@ def revision_detail(lifetimes, classes):
if keep:
h.report("Number of revisions for %s" % name, binsize=10)
def main(path=None):
if path is None:
path = sys.argv[1]
txn_objects = Histogram() # histogram of txn size in objects
txn_bytes = Histogram() # histogram of txn size in bytes
obj_size = Histogram() # histogram of object size
n_updates = Histogram() # oid -> num updates
n_classes = Histogram() # class -> num objects
lifetimes = {} # oid -> list of tids
class_size = {} # class -> histogram of object size
classes = {} # class -> list of oids
txn_objects = Histogram() # histogram of txn size in objects
txn_bytes = Histogram() # histogram of txn size in bytes
obj_size = Histogram() # histogram of object size
n_updates = Histogram() # oid -> num updates
n_classes = Histogram() # class -> num objects
lifetimes = {} # oid -> list of tids
class_size = {} # class -> histogram of object size
classes = {} # class -> list of oids
MAX = 0
objects = 0
......@@ -203,5 +208,6 @@ def main(path=None):
class_detail(class_size)
if __name__ == "__main__":
main()
......@@ -25,6 +25,7 @@ try:
except ImportError:
from sha import sha as sha1
def main(path, ntxn):
with open(path, "rb") as f:
f.seek(0, 2)
......@@ -32,7 +33,6 @@ def main(path, ntxn):
i = ntxn
while th and i > 0:
hash = sha1(th.get_raw_data()).digest()
l = len(str(th.get_timestamp())) + 1
th.read_meta()
print("%s: hash=%s" % (th.get_timestamp(),
binascii.hexlify(hash).decode()))
......@@ -42,6 +42,7 @@ def main(path, ntxn):
th = th.prev_txn()
i -= 1
def Main():
ntxn = 10
opts, args = getopt.getopt(sys.argv[1:], "n:")
......@@ -51,5 +52,6 @@ def Main():
ntxn = int(v)
main(path, ntxn)
if __name__ == "__main__":
Main()
......@@ -41,13 +41,16 @@ import struct
import sys
from ZODB._compat import FILESTORAGE_MAGIC
class FormatError(ValueError):
"""There is a problem with the format of the FileStorage."""
class Status(object):
checkpoint = b'c'
undone = b'u'
packed_version = FILESTORAGE_MAGIC
TREC_HDR_LEN = 23
......@@ -55,6 +58,7 @@ DREC_HDR_LEN = 42
VERBOSE = 0
def hexify(s):
r"""Format an 8-bit string as hex
......@@ -64,17 +68,20 @@ def hexify(s):
"""
return '0x' + binascii.hexlify(s).decode()
def chatter(msg, level=1):
if VERBOSE >= level:
sys.stdout.write(msg)
def U64(v):
"""Unpack an 8-byte string as a 64-bit long"""
h, l = struct.unpack(">II", v)
h, l_ = struct.unpack(">II", v)
if h:
return (h << 32) + l
return (h << 32) + l_
else:
return l
return l_
def check(path):
with open(path, 'rb') as file:
......@@ -87,7 +94,7 @@ def check(path):
raise FormatError("invalid file header")
pos = 4
tid = b'\000' * 8 # lowest possible tid to start
tid = b'\000' * 8 # lowest possible tid to start
i = 0
while pos:
_pos = pos
......@@ -106,7 +113,7 @@ def check_trec(path, file, pos, ltid, file_size):
used for generating error messages.
"""
h = file.read(TREC_HDR_LEN) #XXX must be bytes under Py3k
h = file.read(TREC_HDR_LEN) # XXX must be bytes under Py3k
if not h:
return None, None
if len(h) != TREC_HDR_LEN:
......@@ -120,7 +127,7 @@ def check_trec(path, file, pos, ltid, file_size):
(path, pos, hexify(tid), hexify(ltid)))
ltid = tid
tl = U64(stl) # transaction record length - 8
tl = U64(stl) # transaction record length - 8
if pos + tl + 8 > file_size:
raise FormatError("%s truncated possibly because of"
" damaged records at %s" % (path, pos))
......@@ -140,7 +147,7 @@ def check_trec(path, file, pos, ltid, file_size):
if status != Status.undone:
pos = tpos + tmeta_len
file.read(ul + dl + el) # skip transaction metadata
file.read(ul + dl + el) # skip transaction metadata
i = 0
while pos < tend:
......@@ -162,6 +169,7 @@ def check_trec(path, file, pos, ltid, file_size):
pos = tend + 8
return pos, tid
def check_drec(path, file, pos, tpos, tid):
"""Check a data record for the current transaction record"""
......@@ -170,7 +178,7 @@ def check_drec(path, file, pos, tpos, tid):
raise FormatError("%s truncated at %s" % (path, pos))
oid, serial, _prev, _tloc, vlen, _plen = (
struct.unpack(">8s8s8s8sH8s", h))
prev = U64(_prev)
U64(_prev)
tloc = U64(_tloc)
plen = U64(_plen)
dlen = DREC_HDR_LEN + (plen or 8)
......@@ -178,8 +186,8 @@ def check_drec(path, file, pos, tpos, tid):
if vlen:
dlen = dlen + 16 + vlen
file.seek(8, 1)
pv = U64(file.read(8))
file.seek(vlen, 1) # skip the version data
U64(file.read(8))
file.seek(vlen, 1) # skip the version data
if tloc != tpos:
raise FormatError("%s data record exceeds transaction record "
......@@ -195,9 +203,11 @@ def check_drec(path, file, pos, tpos, tid):
return pos, oid
def usage():
sys.exit(__doc__)
def main(args=None):
if args is None:
args = sys.argv[1:]
......@@ -221,5 +231,6 @@ def main(args=None):
chatter("no errors detected")
if __name__ == "__main__":
main()
......@@ -6,12 +6,12 @@ Note: To run this test script fstest.py must be on your PYTHONPATH.
from cStringIO import StringIO
import re
import struct
import unittest
import ZODB.tests.util
import fstest
from fstest import FormatError, U64
class TestCorruptedFS(ZODB.tests.util.TestCase):
f = open('test-checker.fs', 'rb')
......@@ -117,7 +117,7 @@ class TestCorruptedFS(ZODB.tests.util.TestCase):
self._file.write(data)
buf = self._datafs.read(tl - 8)
self._file.write(buf[0])
assert tl <= 1<<16, "can't use this transaction for this test"
assert tl <= 1 << 16, "can't use this transaction for this test"
self._file.write("\777\777")
self._file.write(buf[3:])
self.detectsError("invalid transaction header")
......@@ -172,6 +172,3 @@ class TestCorruptedFS(ZODB.tests.util.TestCase):
self._file.write("\000" * 4 + "\077" + "\000" * 3)
self._file.write(data[32:])
self.detectsError("record exceeds transaction")
if __name__ == "__main__":
unittest.main()
......@@ -82,7 +82,7 @@ import profile
from persistent.timestamp import TimeStamp
from ZODB import utils
from ZODB import StorageTypes # XXX: This import does not exist
from ZODB import StorageTypes # XXX: This import does not exist
PROGRAM = sys.argv[0]
......@@ -130,7 +130,7 @@ def main():
elif opt in ('-v', '--verbose'):
options.verbose += 1
elif opt in ('-T', '--storage_types'):
print_types()
print('Unknown option.')
sys.exit(0)
elif opt in ('-S', '--stype'):
options.stype = arg
......@@ -247,16 +247,16 @@ def doit(srcdb, dstdb, options):
t = TimeStamp(tid)
if t <= ts:
if ok:
print((
'Time stamps are out of order %s, %s' % (ts, t)), file=sys.stderr)
print('Time stamps are out of order %s, %s' % (ts, t),
file=sys.stderr)
ok = False
ts = t.laterThan(ts)
tid = ts.raw()
else:
ts = t
if not ok:
print((
'Time stamps are back in order %s' % t), file=sys.stderr)
print('Time stamps are back in order %s' % t,
file=sys.stderr)
ok = True
if verbose > 1:
print(ts)
......@@ -310,7 +310,7 @@ def doit(srcdb, dstdb, options):
tidstr = utils.U64(tid)
format = "%4d. %20s %6d %8d %6.4f %6.4f %6.4f %6.4f %6.4f"
print(format % (skipper, tidstr, objects, size,
t4-t0, t1-t0, t2-t1, t3-t2, t4-t3), file=outfp)
t4-t0, t1-t0, t2-t1, t3-t2, t4-t3), file=outfp)
total_pickle_size += size
total_object_count += objects
......
......@@ -23,17 +23,17 @@ from ZODB.blob import FilesystemHelper
from ZODB.utils import oid_repr
def link_or_copy(f1, f2):
try:
os.link(f1, f2)
except OSError:
shutil.copy(f1, f2)
# Check if we actually have link
try:
os.link
except AttributeError:
link_or_copy = shutil.copy
else:
def link_or_copy(f1, f2):
try:
os.link(f1, f2)
except OSError:
shutil.copy(f1, f2)
def migrate(source, dest, layout):
......
......@@ -13,6 +13,7 @@ from ZODB.utils import U64, get_pickle_metadata, load_current
from ZODB.serialize import referencesf
from six.moves import filter
def find_paths(root, maxdist):
"""Find Python attribute traversal paths for objects to maxdist distance.
......@@ -37,7 +38,7 @@ def find_paths(root, maxdist):
if oid is not None:
paths[oid] = path
if dist < maxdist:
getattr(obj, 'foo', None) # unghostify
getattr(obj, 'foo', None) # unghostify
try:
items = obj.__dict__.items()
except AttributeError:
......@@ -48,6 +49,7 @@ def find_paths(root, maxdist):
return paths
def main(path):
fs = FileStorage(path, read_only=1)
if PACK:
......@@ -60,6 +62,7 @@ def main(path):
def total_size(oid):
cache = {}
cache_size = 1000
def _total_size(oid, seen):
v = cache.get(oid)
if v is not None:
......@@ -91,10 +94,11 @@ def main(path):
for oid in keys:
data, serialno = load_current(fs, oid)
mod, klass = get_pickle_metadata(data)
refs = referencesf(data)
referencesf(data)
path = paths.get(oid, '-')
print(fmt % (U64(oid), len(data), total_size(oid), path, mod, klass))
def Main():
import sys
import getopt
......@@ -122,5 +126,6 @@ def Main():
VERBOSE += 1
main(path)
if __name__ == "__main__":
Main()
......@@ -18,6 +18,7 @@ $Id$
from ZODB.serialize import referencesf
def referrers(storage):
result = {}
for transaction in storage.iterator():
......
......@@ -85,6 +85,7 @@ Options for -V/--verify:
Verify file sizes only (skip md5 checksums).
"""
from __future__ import print_function
import re
import os
import shutil
import sys
......@@ -176,7 +177,7 @@ def parseargs(argv):
'date=',
'output=',
'with-verification',
])
])
except getopt.error as msg:
usage(1, msg)
......@@ -299,6 +300,8 @@ def fsync(afile):
# Return the total number of bytes read == the total number of bytes
# passed in all to func(). Leaves the file position just after the
# last byte read.
def dofile(func, fp, n=None):
bytesread = 0
while n is None or n > 0:
......@@ -320,6 +323,7 @@ def dofile(func, fp, n=None):
def checksum(fp, n):
# Checksum the first n bytes of the specified file
sum = md5()
def func(data):
sum.update(data)
dofile(func, fp, n)
......@@ -336,6 +340,7 @@ def file_size(fp):
def checksum_and_size(fp):
# Checksum and return it with the size of the file
sum = md5()
def func(data):
sum.update(data)
size = dofile(func, fp, None)
......@@ -374,6 +379,7 @@ def concat(files, ofp=None):
# given. Return the number of bytes written and the md5 checksum of the
# bytes.
sum = md5()
def func(data):
sum.update(data)
if ofp:
......@@ -393,6 +399,7 @@ def concat(files, ofp=None):
def gen_filedate(options):
return getattr(options, 'test_now', time.gmtime()[:6])
def gen_filename(options, ext=None, now=None):
if ext is None:
if options.full:
......@@ -412,10 +419,11 @@ def gen_filename(options, ext=None, now=None):
# files, from the time of the most recent full backup preceding
# options.date, up to options.date.
import re
is_data_file = re.compile(r'\d{4}(?:-\d\d){5}\.(?:delta)?fsz?$').match
del re
def find_files(options):
when = options.date
if not when:
......@@ -455,10 +463,11 @@ def find_files(options):
#
# None, None, None, None
def scandat(repofiles):
fullfile = repofiles[0]
datfile = os.path.splitext(fullfile)[0] + '.dat'
fn = startpos = endpos = sum = None # assume .dat file missing or empty
fn = startpos = endpos = sum = None # assume .dat file missing or empty
try:
fp = open(datfile)
except IOError as e:
......@@ -475,6 +484,7 @@ def scandat(repofiles):
return fn, startpos, endpos, sum
def delete_old_backups(options):
# Delete all full backup files except for the most recent full backup file
all = sorted(filter(is_data_file, os.listdir(options.repository)))
......@@ -515,6 +525,7 @@ def delete_old_backups(options):
pass
os.unlink(os.path.join(options.repository, fname))
def do_full_backup(options):
options.full = True
tnow = gen_filedate(options)
......@@ -714,7 +725,8 @@ def do_recover(options):
"%s has checksum %s instead of %s" % (
repofile, reposum, expected_truth['sum']))
totalsz += reposz
log("Recovered chunk %s : %s bytes, md5: %s", repofile, reposz, reposum)
log("Recovered chunk %s : %s bytes, md5: %s",
repofile, reposz, reposum)
log("Recovered a total of %s bytes", totalsz)
else:
reposz, reposum = concat(repofiles, outfp)
......@@ -725,7 +737,8 @@ def do_recover(options):
source_index = '%s.index' % last_base
target_index = '%s.index' % options.output
if os.path.exists(source_index):
log('Restoring index file %s to %s', source_index, target_index)
log('Restoring index file %s to %s',
source_index, target_index)
shutil.copyfile(source_index, target_index)
else:
log('No index file to restore: %s', source_index)
......@@ -737,8 +750,8 @@ def do_recover(options):
try:
os.rename(temporary_output_file, options.output)
except OSError:
log("ZODB has been fully recovered as %s, but it cannot be renamed into : %s",
temporary_output_file, options.output)
log("ZODB has been fully recovered as %s, but it cannot be renamed"
" into : %s", temporary_output_file, options.output)
raise
......@@ -759,10 +772,12 @@ def do_verify(options):
log("Verifying %s", filename)
try:
if filename.endswith('fsz'):
actual_sum, size = get_checksum_and_size_of_gzipped_file(filename, options.quick)
actual_sum, size = get_checksum_and_size_of_gzipped_file(
filename, options.quick)
when_uncompressed = ' (when uncompressed)'
else:
actual_sum, size = get_checksum_and_size_of_file(filename, options.quick)
actual_sum, size = get_checksum_and_size_of_file(
filename, options.quick)
when_uncompressed = ''
except IOError:
error("%s is missing", filename)
......
......@@ -12,6 +12,7 @@ from ZODB.FileStorage import FileStorage
from ZODB.utils import U64, get_pickle_metadata, load_current
import six
def run(path, v=0):
fs = FileStorage(path, read_only=1)
# break into the file implementation
......@@ -31,12 +32,13 @@ def run(path, v=0):
if v:
print("%8s %5d %s" % (U64(oid), len(data), key))
L = totals.items()
L.sort(lambda a, b: cmp(a[1], b[1]))
L.sort(key=lambda x: x[1])
L.reverse()
print("Totals per object class:")
for key, (bytes, count) in L:
print("%8d %8d %s" % (count, bytes, key))
def main():
import sys
import getopt
......@@ -56,5 +58,6 @@ def main():
path = args[0]
run(path, v)
if __name__ == "__main__":
main()
......@@ -38,6 +38,7 @@ checker = zope.testing.renormalizing.RENormalizing([
"length=<LENGTH> offset=4 (+48)"),
])
def test_suite():
return unittest.TestSuite((
doctest.DocFileSuite(
......@@ -45,4 +46,4 @@ def test_suite():
'fstail.txt',
setUp=ZODB.tests.util.setUp, tearDown=ZODB.tests.util.tearDown,
checker=checker),
))
))
......@@ -57,6 +57,3 @@ class FsdumpFsstatsTests(TestCase):
with open("stdout") as f:
self.assertEqual(f.readline().strip(),
"Summary: 1 txns, 1 objects, 1 revisions")
......@@ -19,6 +19,7 @@ import ZODB
from zope.testing import setupstack
from zope.testing.renormalizing import RENormalizing
def test_fstest_verbose():
r"""
>>> db = ZODB.DB('data.fs')
......@@ -52,4 +53,3 @@ def test_suite():
doctest.DocTestSuite(setUp=setupstack.setUpDirectory,
tearDown=setupstack.tearDown),
])
......@@ -28,11 +28,13 @@ else:
_NOISY = os.environ.get('NOISY_REPOZO_TEST_OUTPUT')
def _write_file(name, bits, mode='wb'):
with open(name, mode) as f:
f.write(bits)
f.flush()
def _read_file(name, mode='rb'):
with open(name, mode) as f:
return f.read()
......@@ -313,7 +315,6 @@ class Test_checksum(unittest.TestCase, FileopsBase):
self.assertEqual(sum, md5(b''.join(self._makeChunks())).hexdigest())
def test_nonempty_read_count(self):
chunks = []
file = self._makeFile()
sum = self._callFUT(file, 42)
self.assertEqual(sum, md5(b'x' * 42).hexdigest())
......@@ -335,13 +336,16 @@ class OptionsTestBase(object):
def _makeOptions(self, **kw):
import tempfile
self._repository_directory = tempfile.mkdtemp(prefix='test-repozo-')
class Options(object):
repository = self._repository_directory
date = None
def __init__(self, **kw):
self.__dict__.update(kw)
return Options(**kw)
class Test_copyfile(OptionsTestBase, unittest.TestCase):
def _callFUT(self, options, dest, start, n):
......@@ -413,10 +417,13 @@ class Test_concat(OptionsTestBase, unittest.TestCase):
class Faux(object):
_closed = False
def __init__(self):
self._written = []
def write(self, data):
self._written.append(data)
def close(self):
self._closed = True
......@@ -426,7 +433,10 @@ class Test_concat(OptionsTestBase, unittest.TestCase):
self.assertEqual(ofp._written, [x.encode() for x in 'ABC'])
self.assertFalse(ofp._closed)
_marker = object()
class Test_gen_filename(OptionsTestBase, unittest.TestCase):
def _callFUT(self, options, ext=_marker):
......@@ -436,39 +446,39 @@ class Test_gen_filename(OptionsTestBase, unittest.TestCase):
return gen_filename(options, ext)
def test_explicit_ext(self):
options = self._makeOptions(test_now = (2010, 5, 14, 12, 52, 31))
options = self._makeOptions(test_now=(2010, 5, 14, 12, 52, 31))
fn = self._callFUT(options, '.txt')
self.assertEqual(fn, '2010-05-14-12-52-31.txt')
def test_full_no_gzip(self):
options = self._makeOptions(test_now = (2010, 5, 14, 12, 52, 31),
full = True,
gzip = False,
)
options = self._makeOptions(test_now=(2010, 5, 14, 12, 52, 31),
full=True,
gzip=False,
)
fn = self._callFUT(options)
self.assertEqual(fn, '2010-05-14-12-52-31.fs')
def test_full_w_gzip(self):
options = self._makeOptions(test_now = (2010, 5, 14, 12, 52, 31),
full = True,
gzip = True,
)
options = self._makeOptions(test_now=(2010, 5, 14, 12, 52, 31),
full=True,
gzip=True,
)
fn = self._callFUT(options)
self.assertEqual(fn, '2010-05-14-12-52-31.fsz')
def test_incr_no_gzip(self):
options = self._makeOptions(test_now = (2010, 5, 14, 12, 52, 31),
full = False,
gzip = False,
)
options = self._makeOptions(test_now=(2010, 5, 14, 12, 52, 31),
full=False,
gzip=False,
)
fn = self._callFUT(options)
self.assertEqual(fn, '2010-05-14-12-52-31.deltafs')
def test_incr_w_gzip(self):
options = self._makeOptions(test_now = (2010, 5, 14, 12, 52, 31),
full = False,
gzip = True,
)
options = self._makeOptions(test_now=(2010, 5, 14, 12, 52, 31),
full=False,
gzip=True,
)
fn = self._callFUT(options)
self.assertEqual(fn, '2010-05-14-12-52-31.deltafsz')
......@@ -503,7 +513,7 @@ class Test_find_files(OptionsTestBase, unittest.TestCase):
(12, 13, 14, '.dat'),
(13, 14, 15, '.deltafs'),
(14, 15, 16, '.deltafs'),
]:
]:
files.append(self._makeFile(h, m, s, e))
found = self._callFUT(options)
# Older files, .dat file not included
......@@ -522,7 +532,7 @@ class Test_find_files(OptionsTestBase, unittest.TestCase):
(12, 13, 14, '.dat'),
(13, 14, 15, '.deltafs'),
(14, 15, 16, '.deltafs'),
]:
]:
files.append(self._makeFile(h, m, s, e))
found = self._callFUT(options)
# Older files, .dat file not included
......@@ -536,7 +546,7 @@ class Test_scandat(OptionsTestBase, unittest.TestCase):
return scandat(repofiles)
def test_no_dat_file(self):
options = self._makeOptions()
self._makeOptions()
fsfile = os.path.join(self._repository_directory, 'foo.fs')
fn, startpos, endpos, sum = self._callFUT([fsfile])
self.assertEqual(fn, None)
......@@ -545,7 +555,7 @@ class Test_scandat(OptionsTestBase, unittest.TestCase):
self.assertEqual(sum, None)
def test_empty_dat_file(self):
options = self._makeOptions()
self._makeOptions()
fsfile = os.path.join(self._repository_directory, 'foo.fs')
datfile = os.path.join(self._repository_directory, 'foo.dat')
_write_file(datfile, b'')
......@@ -556,7 +566,7 @@ class Test_scandat(OptionsTestBase, unittest.TestCase):
self.assertEqual(sum, None)
def test_single_line(self):
options = self._makeOptions()
self._makeOptions()
fsfile = os.path.join(self._repository_directory, 'foo.fs')
datfile = os.path.join(self._repository_directory, 'foo.dat')
_write_file(datfile, b'foo.fs 0 123 ABC\n')
......@@ -567,7 +577,7 @@ class Test_scandat(OptionsTestBase, unittest.TestCase):
self.assertEqual(sum, 'ABC')
def test_multiple_lines(self):
options = self._makeOptions()
self._makeOptions()
fsfile = os.path.join(self._repository_directory, 'foo.fs')
datfile = os.path.join(self._repository_directory, 'foo.dat')
_write_file(datfile, b'foo.fs 0 123 ABC\n'
......@@ -611,7 +621,7 @@ class Test_delete_old_backups(OptionsTestBase, unittest.TestCase):
FILENAMES = ['2009-12-20-10-08-03.fs',
'2009-12-20-10-08-03.dat',
'2009-12-20-10-08-03.index',
]
]
self._callFUT(filenames=FILENAMES)
remaining = os.listdir(self._repository_directory)
self.assertEqual(len(remaining), len(FILENAMES))
......@@ -623,16 +633,16 @@ class Test_delete_old_backups(OptionsTestBase, unittest.TestCase):
OLDER_FULL = ['2009-12-20-00-01-03.fs',
'2009-12-20-00-01-03.dat',
'2009-12-20-00-01-03.index',
]
]
DELTAS = ['2009-12-21-00-00-01.deltafs',
'2009-12-21-00-00-01.index',
'2009-12-22-00-00-01.deltafs',
'2009-12-22-00-00-01.index',
]
]
CURRENT_FULL = ['2009-12-23-00-00-01.fs',
'2009-12-23-00-00-01.dat',
'2009-12-23-00-00-01.index',
]
]
FILENAMES = OLDER_FULL + DELTAS + CURRENT_FULL
self._callFUT(filenames=FILENAMES)
remaining = os.listdir(self._repository_directory)
......@@ -651,16 +661,16 @@ class Test_delete_old_backups(OptionsTestBase, unittest.TestCase):
OLDER_FULL = ['2009-12-20-00-01-03.fsz',
'2009-12-20-00-01-03.dat',
'2009-12-20-00-01-03.index',
]
]
DELTAS = ['2009-12-21-00-00-01.deltafsz',
'2009-12-21-00-00-01.index',
'2009-12-22-00-00-01.deltafsz',
'2009-12-22-00-00-01.index',
]
]
CURRENT_FULL = ['2009-12-23-00-00-01.fsz',
'2009-12-23-00-00-01.dat',
'2009-12-23-00-00-01.index',
]
]
FILENAMES = OLDER_FULL + DELTAS + CURRENT_FULL
self._callFUT(filenames=FILENAMES)
remaining = os.listdir(self._repository_directory)
......@@ -684,7 +694,7 @@ class Test_do_full_backup(OptionsTestBase, unittest.TestCase):
def _makeDB(self):
import tempfile
datadir = self._data_directory = tempfile.mkdtemp(prefix='zodb-test-')
self._data_directory = tempfile.mkdtemp(prefix='zodb-test-')
return OurDB(self._data_directory)
def test_dont_overwrite_existing_file(self):
......@@ -694,8 +704,8 @@ class Test_do_full_backup(OptionsTestBase, unittest.TestCase):
options = self._makeOptions(full=True,
file=db._file_name,
gzip=False,
test_now = (2010, 5, 14, 10, 51, 22),
)
test_now=(2010, 5, 14, 10, 51, 22),
)
fqn = os.path.join(self._repository_directory, gen_filename(options))
_write_file(fqn, b'TESTING')
self.assertRaises(WouldOverwriteFiles, self._callFUT, options)
......@@ -708,8 +718,8 @@ class Test_do_full_backup(OptionsTestBase, unittest.TestCase):
options = self._makeOptions(file=db._file_name,
gzip=False,
killold=False,
test_now = (2010, 5, 14, 10, 51, 22),
)
test_now=(2010, 5, 14, 10, 51, 22),
)
self._callFUT(options)
target = os.path.join(self._repository_directory,
gen_filename(options))
......@@ -717,9 +727,9 @@ class Test_do_full_backup(OptionsTestBase, unittest.TestCase):
self.assertEqual(_read_file(target), original)
datfile = os.path.join(self._repository_directory,
gen_filename(options, '.dat'))
self.assertEqual(_read_file(datfile, mode='r'), #XXX 'rb'?
self.assertEqual(_read_file(datfile, mode='r'), # XXX 'rb'?
'%s 0 %d %s\n' %
(target, len(original), md5(original).hexdigest()))
(target, len(original), md5(original).hexdigest()))
ndxfile = os.path.join(self._repository_directory,
gen_filename(options, '.index'))
ndx_info = fsIndex.load(ndxfile)
......@@ -739,7 +749,7 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
def _makeDB(self):
import tempfile
datadir = self._data_directory = tempfile.mkdtemp(prefix='zodb-test-')
self._data_directory = tempfile.mkdtemp(prefix='zodb-test-')
return OurDB(self._data_directory)
def test_dont_overwrite_existing_file(self):
......@@ -750,9 +760,9 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
options = self._makeOptions(full=False,
file=db._file_name,
gzip=False,
test_now = (2010, 5, 14, 10, 51, 22),
date = None,
)
test_now=(2010, 5, 14, 10, 51, 22),
date=None,
)
fqn = os.path.join(self._repository_directory, gen_filename(options))
_write_file(fqn, b'TESTING')
repofiles = find_files(options)
......@@ -768,24 +778,23 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
options = self._makeOptions(file=db._file_name,
gzip=False,
killold=False,
test_now = (2010, 5, 14, 10, 51, 22),
date = None,
)
test_now=(2010, 5, 14, 10, 51, 22),
date=None,
)
fullfile = os.path.join(self._repository_directory,
'2010-05-14-00-00-00.fs')
original = _read_file(db._file_name)
last = len(original)
_write_file(fullfile, original)
datfile = os.path.join(self._repository_directory,
'2010-05-14-00-00-00.dat')
'2010-05-14-00-00-00.dat')
repofiles = [fullfile, datfile]
self._callFUT(options, oldpos, repofiles)
target = os.path.join(self._repository_directory,
gen_filename(options))
self.assertEqual(_read_file(target), b'')
self.assertEqual(_read_file(datfile, mode='r'), #XXX mode='rb'?
self.assertEqual(_read_file(datfile, mode='r'), # XXX mode='rb'?
'%s %d %d %s\n' %
(target, oldpos, oldpos, md5(b'').hexdigest()))
(target, oldpos, oldpos, md5(b'').hexdigest()))
ndxfile = os.path.join(self._repository_directory,
gen_filename(options, '.index'))
ndx_info = fsIndex.load(ndxfile)
......@@ -805,15 +814,15 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
options = self._makeOptions(file=db._file_name,
gzip=False,
killold=False,
test_now = (2010, 5, 14, 10, 51, 22),
date = None,
)
test_now=(2010, 5, 14, 10, 51, 22),
date=None,
)
fullfile = os.path.join(self._repository_directory,
'2010-05-14-00-00-00.fs')
original = _read_file(db._file_name)
f = _write_file(fullfile, original)
datfile = os.path.join(self._repository_directory,
'2010-05-14-00-00-00.dat')
'2010-05-14-00-00-00.dat')
repofiles = [fullfile, datfile]
db.mutate()
newpos = db.pos
......@@ -824,9 +833,9 @@ class Test_do_incremental_backup(OptionsTestBase, unittest.TestCase):
f.seek(oldpos)
increment = f.read()
self.assertEqual(_read_file(target), increment)
self.assertEqual(_read_file(datfile, mode='r'), #XXX mode='rb'?
self.assertEqual(_read_file(datfile, mode='r'), # XXX mode='rb'?
'%s %d %d %s\n' %
(target, oldpos, newpos,
(target, oldpos, newpos,
md5(increment).hexdigest()))
ndxfile = os.path.join(self._repository_directory,
gen_filename(options, '.index'))
......@@ -850,7 +859,7 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
if text is None:
text = name
fqn = os.path.join(self._repository_directory, name)
f = _write_file(fqn, text.encode())
_write_file(fqn, text.encode())
return fqn
def test_no_files(self):
......@@ -872,7 +881,7 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
(12, 13, 14, '.dat'),
(13, 14, 15, '.deltafs'),
(14, 15, 16, '.deltafs'),
]:
]:
files.append(self._makeFile(h, m, s, e))
self.assertRaises(NoFiles, self._callFUT, options)
......@@ -880,7 +889,6 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
import tempfile
dd = self._data_directory = tempfile.mkdtemp(prefix='zodb-test-')
output = os.path.join(dd, 'Data.fs')
index = os.path.join(dd, 'Data.fs.index')
options = self._makeOptions(date='2010-05-15-13-30-57',
output=output,
withverify=False)
......@@ -908,7 +916,6 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
import tempfile
dd = self._data_directory = tempfile.mkdtemp(prefix='zodb-test-')
output = os.path.join(dd, 'Data.fs')
index = os.path.join(dd, 'Data.fs.index')
options = self._makeOptions(date='2010-05-15-13-30-57',
output=output,
withverify=False)
......@@ -936,15 +943,15 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
import tempfile
dd = self._data_directory = tempfile.mkdtemp(prefix='zodb-test-')
output = os.path.join(dd, 'Data.fs')
index = os.path.join(dd, 'Data.fs.index')
options = self._makeOptions(date='2010-05-15-13-30-57',
output=output,
withverify=True)
self._makeFile(2, 3, 4, '.fs', 'AAA')
self._makeFile(4, 5, 6, '.deltafs', 'BBBB')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n'
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60\n')
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60\n') # noqa: E501 line too long
self._callFUT(options)
self.assertFalse(os.path.exists(output + '.part'))
self.assertEqual(_read_file(output), b'AAABBBB')
......@@ -954,15 +961,15 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
from ZODB.scripts.repozo import VerificationFail
dd = self._data_directory = tempfile.mkdtemp(prefix='zodb-test-')
output = os.path.join(dd, 'Data.fs')
index = os.path.join(dd, 'Data.fs.index')
options = self._makeOptions(date='2010-05-15-13-30-57',
output=output,
withverify=True)
self._makeFile(2, 3, 4, '.fs', 'AAA')
self._makeFile(4, 5, 6, '.deltafs', 'BBBB')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n'
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec61\n')
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec61\n') # noqa: E501 line too long
self.assertRaises(VerificationFail, self._callFUT, options)
self.assertTrue(os.path.exists(output + '.part'))
......@@ -971,15 +978,15 @@ class Test_do_recover(OptionsTestBase, unittest.TestCase):
from ZODB.scripts.repozo import VerificationFail
dd = self._data_directory = tempfile.mkdtemp(prefix='zodb-test-')
output = os.path.join(dd, 'Data.fs')
index = os.path.join(dd, 'Data.fs.index')
options = self._makeOptions(date='2010-05-15-13-30-57',
output=output,
withverify=True)
self._makeFile(2, 3, 4, '.fs', 'AAA')
self._makeFile(4, 5, 6, '.deltafs', 'BBBB')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n'
'/backup/2010-05-14-04-05-06.deltafs 3 8 f50881ced34c7d9e6bce100bf33dec60\n')
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 8 f50881ced34c7d9e6bce100bf33dec60\n') # noqa: E501 line too long
self.assertRaises(VerificationFail, self._callFUT, options)
self.assertTrue(os.path.exists(output + '.part'))
......@@ -990,6 +997,7 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
from ZODB.scripts import repozo
errors = []
orig_error = repozo.error
def _error(msg, *args):
errors.append(msg % args)
repozo.error = _error
......@@ -1024,26 +1032,29 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
options = self._makeOptions(quick=False)
self._makeFile(2, 3, 4, '.fs', 'AAA')
self._makeFile(4, 5, 6, '.deltafs', 'BBBB')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n'
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60\n')
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60\n') # noqa: E501 line too long
self.assertEqual(self._callFUT(options), [])
def test_all_is_fine_gzip(self):
options = self._makeOptions(quick=False)
self._makeFile(2, 3, 4, '.fsz', 'AAA')
self._makeFile(4, 5, 6, '.deltafsz', 'BBBB')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7\n'
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60\n')
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60\n') # noqa: E501 line too long
self.assertEqual(self._callFUT(options), [])
def test_missing_file(self):
options = self._makeOptions(quick=True)
self._makeFile(2, 3, 4, '.fs', 'AAA')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n'
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60\n')
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60\n') # noqa: E501 line too long
self.assertEqual(self._callFUT(options),
[options.repository + os.path.sep +
'2010-05-14-04-05-06.deltafs is missing'])
......@@ -1051,9 +1062,10 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
def test_missing_file_gzip(self):
options = self._makeOptions(quick=True)
self._makeFile(2, 3, 4, '.fsz', 'AAA')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7\n'
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60\n')
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60\n') # noqa: E501 line too long
self.assertEqual(self._callFUT(options),
[options.repository + os.path.sep +
'2010-05-14-04-05-06.deltafsz is missing'])
......@@ -1062,9 +1074,10 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
options = self._makeOptions(quick=False)
self._makeFile(2, 3, 4, '.fs', 'AAA')
self._makeFile(4, 5, 6, '.deltafs', 'BBB')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n'
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60\n')
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60\n') # noqa: E501 line too long
self.assertEqual(self._callFUT(options),
[options.repository + os.path.sep +
'2010-05-14-04-05-06.deltafs is 3 bytes,'
......@@ -1074,21 +1087,24 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
options = self._makeOptions(quick=False)
self._makeFile(2, 3, 4, '.fsz', 'AAA')
self._makeFile(4, 5, 6, '.deltafsz', 'BBB')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7\n'
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60\n')
self.assertEqual(self._callFUT(options),
[options.repository + os.path.sep +
'2010-05-14-04-05-06.deltafsz is 3 bytes (when uncompressed),'
' should be 4 bytes'])
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60\n') # noqa: E501 line too long
self.assertEqual(
self._callFUT(options),
[options.repository + os.path.sep +
'2010-05-14-04-05-06.deltafsz is 3 bytes (when uncompressed),'
' should be 4 bytes'])
def test_bad_checksum(self):
options = self._makeOptions(quick=False)
self._makeFile(2, 3, 4, '.fs', 'AAA')
self._makeFile(4, 5, 6, '.deltafs', 'BbBB')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n'
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60\n')
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 e1faffb3e614e6c2fba74296962386b7\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 f50881ced34c7d9e6bce100bf33dec60\n') # noqa: E501 line too long
self.assertEqual(self._callFUT(options),
[options.repository + os.path.sep +
'2010-05-14-04-05-06.deltafs has checksum'
......@@ -1099,31 +1115,35 @@ class Test_do_verify(OptionsTestBase, unittest.TestCase):
options = self._makeOptions(quick=False)
self._makeFile(2, 3, 4, '.fsz', 'AAA')
self._makeFile(4, 5, 6, '.deltafsz', 'BbBB')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7\n'
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60\n')
self.assertEqual(self._callFUT(options),
[options.repository + os.path.sep +
'2010-05-14-04-05-06.deltafsz has checksum'
' 36486440db255f0ee6ab109d5d231406 (when uncompressed) instead of'
' f50881ced34c7d9e6bce100bf33dec60'])
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fsz 0 3 e1faffb3e614e6c2fba74296962386b7\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafsz 3 7 f50881ced34c7d9e6bce100bf33dec60\n') # noqa: E501 line too long
self.assertEqual(
self._callFUT(options),
[options.repository + os.path.sep +
'2010-05-14-04-05-06.deltafsz has checksum'
' 36486440db255f0ee6ab109d5d231406 (when uncompressed) instead of'
' f50881ced34c7d9e6bce100bf33dec60'])
def test_quick_ignores_checksums(self):
options = self._makeOptions(quick=True)
self._makeFile(2, 3, 4, '.fs', 'AAA')
self._makeFile(4, 5, 6, '.deltafs', 'BBBB')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n'
'/backup/2010-05-14-04-05-06.deltafs 3 7 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n')
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fs 0 3 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafs 3 7 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n') # noqa: E501 line too long
self.assertEqual(self._callFUT(options), [])
def test_quick_ignores_checksums_gzip(self):
options = self._makeOptions(quick=True)
self._makeFile(2, 3, 4, '.fsz', 'AAA')
self._makeFile(4, 5, 6, '.deltafsz', 'BBBB')
self._makeFile(2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fsz 0 3 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n'
'/backup/2010-05-14-04-05-06.deltafsz 3 7 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n')
self._makeFile(
2, 3, 4, '.dat',
'/backup/2010-05-14-02-03-04.fsz 0 3 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n' # noqa: E501 line too long
'/backup/2010-05-14-04-05-06.deltafsz 3 7 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n') # noqa: E501 line too long
self.assertEqual(self._callFUT(options), [])
......@@ -1175,7 +1195,6 @@ class MonteCarloTests(unittest.TestCase):
import random
from shutil import copyfile
from time import gmtime
from time import sleep
self.db.mutate()
# Pack about each tenth time.
......@@ -1207,7 +1226,8 @@ class MonteCarloTests(unittest.TestCase):
self.assertRestored()
def assertRestored(self, correctpath='Data.fs', when=None):
# Do recovery to time 'when', and check that it's identical to correctpath.
# Do recovery to time 'when', and check that it's identical to
# correctpath.
# restore to Restored.fs
restoredfile = os.path.join(self.restoredir, 'Restored.fs')
argv = ['-Rr', self.backupdir, '-o', restoredfile]
......@@ -1222,7 +1242,7 @@ class MonteCarloTests(unittest.TestCase):
fguts = _read_file(correctpath)
gguts = _read_file(restoredfile)
msg = ("guts don't match\ncorrectpath=%r when=%r\n cmd=%r" %
(correctpath, when, ' '.join(argv)))
(correctpath, when, ' '.join(argv)))
self.assertEqual(fguts, gguts, msg)
......@@ -1239,7 +1259,7 @@ def test_suite():
unittest.makeSuite(Test_delete_old_backups),
unittest.makeSuite(Test_do_full_backup),
unittest.makeSuite(Test_do_incremental_backup),
#unittest.makeSuite(Test_do_backup), #TODO
# unittest.makeSuite(Test_do_backup), #TODO
unittest.makeSuite(Test_do_recover),
unittest.makeSuite(Test_do_verify),
# N.B.: this test take forever to run (~40sec on a fast laptop),
......
......@@ -123,6 +123,7 @@ import threading
import time
import transaction
class JobProducer(object):
def __init__(self):
......@@ -142,7 +143,6 @@ class JobProducer(object):
return not not self.jobs
class MBox(object):
def __init__(self, filename):
......@@ -199,8 +199,11 @@ class MBox(object):
message.mbox = self.__name__
return message
bins = 9973
#bins = 11
# bins = 11
def mailfolder(app, mboxname, number):
mail = getattr(app, mboxname, None)
if mail is None:
......@@ -210,7 +213,7 @@ def mailfolder(app, mboxname, number):
mail.length = Length()
for i in range(bins):
mail.manage_addFolder('b'+str(i))
bin = hash(str(number))%bins
bin = hash(str(number)) % bins
return getattr(mail, 'b'+str(bin))
......@@ -219,24 +222,25 @@ def VmSize():
try:
with open('/proc/%s/status' % os.getpid()) as f:
lines = f.readlines()
except:
except: # noqa: E722 do not use bare 'except'
return 0
else:
l = list(filter(lambda l: l[:7] == 'VmSize:', lines))
if l:
l = l[0][7:].strip().split()[0]
return int(l)
l_ = list(filter(lambda l: l[:7] == 'VmSize:', lines))
if l_:
l_ = l_[0][7:].strip().split()[0]
return int(l_)
return 0
def setup(lib_python):
try:
os.remove(os.path.join(lib_python, '..', '..', 'var', 'Data.fs'))
except:
except: # noqa: E722 do not use bare 'except'
pass
import Zope2
import Products
import AccessControl.SecurityManagement
app=Zope2.app()
app = Zope2.app()
Products.ZCatalog.ZCatalog.manage_addZCatalog(app, 'cat', '')
......@@ -261,6 +265,7 @@ def setup(lib_python):
app._p_jar.close()
def do(db, f, args):
"""Do something in a transaction, retrying of necessary
......@@ -275,8 +280,8 @@ def do(db, f, args):
connection = db.open()
try:
transaction.begin()
t=time.time()
c=time.clock()
t = time.time()
c = time.clock()
try:
try:
r = f(connection, *args)
......@@ -288,8 +293,8 @@ def do(db, f, args):
wcomp += time.time() - t
ccomp += time.clock() - c
t=time.time()
c=time.clock()
t = time.time()
c = time.clock()
try:
try:
transaction.commit()
......@@ -306,6 +311,7 @@ def do(db, f, args):
return start, wcomp, ccomp, rconflicts, wconflicts, wcommit, ccommit, r
def run1(tid, db, factory, job, args):
(start, wcomp, ccomp, rconflicts, wconflicts, wcommit, ccommit, r
) = do(db, job, args)
......@@ -314,6 +320,7 @@ def run1(tid, db, factory, job, args):
start, tid, wcomp, ccomp, rconflicts, wconflicts, wcommit, ccommit,
factory.__name__, r))
def run(jobs, tid=b''):
import Zope2
while 1:
......@@ -321,7 +328,7 @@ def run(jobs, tid=b''):
run1(tid, Zope2.DB, factory, job, args)
if repeatp:
while 1:
i = random.randint(0,100)
i = random.randint(0, 100)
if i > repeatp:
break
run1(tid, Zope2.DB, factory, job, args)
......@@ -350,27 +357,28 @@ def index(connection, messages, catalog, max):
doc = mail[docid]
for h in message.headers:
h = h.strip()
l = h.find(':')
if l <= 0:
l_ = h.find(':')
if l_ <= 0:
continue
name = h[:l].lower()
if name=='subject':
name='title'
v = h[l+1:].strip()
type='string'
name = h[:l_].lower()
if name == 'subject':
name = 'title'
v = h[l_ + 1:].strip()
type = 'string'
if name=='title':
if name == 'title':
doc.manage_changeProperties(title=h)
else:
try:
doc.manage_addProperty(name, v, type)
except:
except: # noqa: E722 do not use bare 'except'
pass
if catalog:
app.cat.catalog_object(doc)
return message.number
class IndexJob(object):
needs_mbox = 1
catalog = 1
......@@ -389,8 +397,11 @@ class InsertJob(IndexJob):
catalog = 0
prefix = 'insert'
wordre = re.compile(r'(\w{3,20})')
stop = 'and', 'not'
def edit(connection, mbox, catalog=1):
app = connection.root()['Application']
mail = getattr(app, mbox.__name__, None)
......@@ -423,7 +434,7 @@ def edit(connection, mbox, catalog=1):
nins = 10
for j in range(ndel):
j = random.randint(0,len(text)-1)
j = random.randint(0, len(text)-1)
word = text[j]
m = wordre.search(word)
if m:
......@@ -444,6 +455,7 @@ def edit(connection, mbox, catalog=1):
return norig, ndel, nins
class EditJob(object):
needs_mbox = 1
prefix = 'edit'
......@@ -456,6 +468,7 @@ class EditJob(object):
def create(self):
return edit, (self.mbox, self.catalog)
class ModifyJob(EditJob):
prefix = 'modify'
catalog = 0
......@@ -480,6 +493,7 @@ def search(connection, terms, number):
return n
class SearchJob(object):
def __init__(self, terms='', number=10):
......@@ -499,189 +513,189 @@ class SearchJob(object):
return search, (self.terms, self.number)
words=['banishment', 'indirectly', 'imprecise', 'peeks',
'opportunely', 'bribe', 'sufficiently', 'Occidentalized', 'elapsing',
'fermenting', 'listen', 'orphanage', 'younger', 'draperies', 'Ida',
'cuttlefish', 'mastermind', 'Michaels', 'populations', 'lent',
'cater', 'attentional', 'hastiness', 'dragnet', 'mangling',
'scabbards', 'princely', 'star', 'repeat', 'deviation', 'agers',
'fix', 'digital', 'ambitious', 'transit', 'jeeps', 'lighted',
'Prussianizations', 'Kickapoo', 'virtual', 'Andrew', 'generally',
'boatsman', 'amounts', 'promulgation', 'Malay', 'savaging',
'courtesan', 'nursed', 'hungered', 'shiningly', 'ship', 'presides',
'Parke', 'moderns', 'Jonas', 'unenlightening', 'dearth', 'deer',
'domesticates', 'recognize', 'gong', 'penetrating', 'dependents',
'unusually', 'complications', 'Dennis', 'imbalances', 'nightgown',
'attached', 'testaments', 'congresswoman', 'circuits', 'bumpers',
'braver', 'Boreas', 'hauled', 'Howe', 'seethed', 'cult', 'numismatic',
'vitality', 'differences', 'collapsed', 'Sandburg', 'inches', 'head',
'rhythmic', 'opponent', 'blanketer', 'attorneys', 'hen', 'spies',
'indispensably', 'clinical', 'redirection', 'submit', 'catalysts',
'councilwoman', 'kills', 'topologies', 'noxious', 'exactions',
'dashers', 'balanced', 'slider', 'cancerous', 'bathtubs', 'legged',
'respectably', 'crochets', 'absenteeism', 'arcsine', 'facility',
'cleaners', 'bobwhite', 'Hawkins', 'stockade', 'provisional',
'tenants', 'forearms', 'Knowlton', 'commit', 'scornful',
'pediatrician', 'greets', 'clenches', 'trowels', 'accepts',
'Carboloy', 'Glenn', 'Leigh', 'enroll', 'Madison', 'Macon', 'oiling',
'entertainingly', 'super', 'propositional', 'pliers', 'beneficiary',
'hospitable', 'emigration', 'sift', 'sensor', 'reserved',
'colonization', 'shrilled', 'momentously', 'stevedore', 'Shanghaiing',
'schoolmasters', 'shaken', 'biology', 'inclination', 'immoderate',
'stem', 'allegory', 'economical', 'daytime', 'Newell', 'Moscow',
'archeology', 'ported', 'scandals', 'Blackfoot', 'leery', 'kilobit',
'empire', 'obliviousness', 'productions', 'sacrificed', 'ideals',
'enrolling', 'certainties', 'Capsicum', 'Brookdale', 'Markism',
'unkind', 'dyers', 'legislates', 'grotesquely', 'megawords',
'arbitrary', 'laughing', 'wildcats', 'thrower', 'sex', 'devils',
'Wehr', 'ablates', 'consume', 'gossips', 'doorways', 'Shari',
'advanced', 'enumerable', 'existentially', 'stunt', 'auctioneers',
'scheduler', 'blanching', 'petulance', 'perceptibly', 'vapors',
'progressed', 'rains', 'intercom', 'emergency', 'increased',
'fluctuating', 'Krishna', 'silken', 'reformed', 'transformation',
'easter', 'fares', 'comprehensible', 'trespasses', 'hallmark',
'tormenter', 'breastworks', 'brassiere', 'bladders', 'civet', 'death',
'transformer', 'tolerably', 'bugle', 'clergy', 'mantels', 'satin',
'Boswellizes', 'Bloomington', 'notifier', 'Filippo', 'circling',
'unassigned', 'dumbness', 'sentries', 'representativeness', 'souped',
'Klux', 'Kingstown', 'gerund', 'Russell', 'splices', 'bellow',
'bandies', 'beefers', 'cameramen', 'appalled', 'Ionian', 'butterball',
'Portland', 'pleaded', 'admiringly', 'pricks', 'hearty', 'corer',
'deliverable', 'accountably', 'mentors', 'accorded',
'acknowledgement', 'Lawrenceville', 'morphology', 'eucalyptus',
'Rena', 'enchanting', 'tighter', 'scholars', 'graduations', 'edges',
'Latinization', 'proficiency', 'monolithic', 'parenthesizing', 'defy',
'shames', 'enjoyment', 'Purdue', 'disagrees', 'barefoot', 'maims',
'flabbergast', 'dishonorable', 'interpolation', 'fanatics', 'dickens',
'abysses', 'adverse', 'components', 'bowl', 'belong', 'Pipestone',
'trainees', 'paw', 'pigtail', 'feed', 'whore', 'conditioner',
'Volstead', 'voices', 'strain', 'inhabits', 'Edwin', 'discourses',
'deigns', 'cruiser', 'biconvex', 'biking', 'depreciation', 'Harrison',
'Persian', 'stunning', 'agar', 'rope', 'wagoner', 'elections',
'reticulately', 'Cruz', 'pulpits', 'wilt', 'peels', 'plants',
'administerings', 'deepen', 'rubs', 'hence', 'dissension', 'implored',
'bereavement', 'abyss', 'Pennsylvania', 'benevolent', 'corresponding',
'Poseidon', 'inactive', 'butchers', 'Mach', 'woke', 'loading',
'utilizing', 'Hoosier', 'undo', 'Semitization', 'trigger', 'Mouthe',
'mark', 'disgracefully', 'copier', 'futility', 'gondola', 'algebraic',
'lecturers', 'sponged', 'instigators', 'looted', 'ether', 'trust',
'feeblest', 'sequencer', 'disjointness', 'congresses', 'Vicksburg',
'incompatibilities', 'commend', 'Luxembourg', 'reticulation',
'instructively', 'reconstructs', 'bricks', 'attache', 'Englishman',
'provocation', 'roughen', 'cynic', 'plugged', 'scrawls', 'antipode',
'injected', 'Daedalus', 'Burnsides', 'asker', 'confronter',
'merriment', 'disdain', 'thicket', 'stinker', 'great', 'tiers',
'oust', 'antipodes', 'Macintosh', 'tented', 'packages',
'Mediterraneanize', 'hurts', 'orthodontist', 'seeder', 'readying',
'babying', 'Florida', 'Sri', 'buckets', 'complementary',
'cartographer', 'chateaus', 'shaves', 'thinkable', 'Tehran',
'Gordian', 'Angles', 'arguable', 'bureau', 'smallest', 'fans',
'navigated', 'dipole', 'bootleg', 'distinctive', 'minimization',
'absorbed', 'surmised', 'Malawi', 'absorbent', 'close', 'conciseness',
'hopefully', 'declares', 'descent', 'trick', 'portend', 'unable',
'mildly', 'Morse', 'reference', 'scours', 'Caribbean', 'battlers',
'astringency', 'likelier', 'Byronizes', 'econometric', 'grad',
'steak', 'Austrian', 'ban', 'voting', 'Darlington', 'bison', 'Cetus',
'proclaim', 'Gilbertson', 'evictions', 'submittal', 'bearings',
'Gothicizer', 'settings', 'McMahon', 'densities', 'determinants',
'period', 'DeKastere', 'swindle', 'promptness', 'enablers', 'wordy',
'during', 'tables', 'responder', 'baffle', 'phosgene', 'muttering',
'limiters', 'custodian', 'prevented', 'Stouffer', 'waltz', 'Videotex',
'brainstorms', 'alcoholism', 'jab', 'shouldering', 'screening',
'explicitly', 'earner', 'commandment', 'French', 'scrutinizing',
'Gemma', 'capacitive', 'sheriff', 'herbivore', 'Betsey', 'Formosa',
'scorcher', 'font', 'damming', 'soldiers', 'flack', 'Marks',
'unlinking', 'serenely', 'rotating', 'converge', 'celebrities',
'unassailable', 'bawling', 'wording', 'silencing', 'scotch',
'coincided', 'masochists', 'graphs', 'pernicious', 'disease',
'depreciates', 'later', 'torus', 'interject', 'mutated', 'causer',
'messy', 'Bechtel', 'redundantly', 'profoundest', 'autopsy',
'philosophic', 'iterate', 'Poisson', 'horridly', 'silversmith',
'millennium', 'plunder', 'salmon', 'missioner', 'advances', 'provers',
'earthliness', 'manor', 'resurrectors', 'Dahl', 'canto', 'gangrene',
'gabler', 'ashore', 'frictionless', 'expansionism', 'emphasis',
'preservations', 'Duane', 'descend', 'isolated', 'firmware',
'dynamites', 'scrawled', 'cavemen', 'ponder', 'prosperity', 'squaw',
'vulnerable', 'opthalmic', 'Simms', 'unite', 'totallers', 'Waring',
'enforced', 'bridge', 'collecting', 'sublime', 'Moore', 'gobble',
'criticizes', 'daydreams', 'sedate', 'apples', 'Concordia',
'subsequence', 'distill', 'Allan', 'seizure', 'Isadore', 'Lancashire',
'spacings', 'corresponded', 'hobble', 'Boonton', 'genuineness',
'artifact', 'gratuities', 'interviewee', 'Vladimir', 'mailable',
'Bini', 'Kowalewski', 'interprets', 'bereave', 'evacuated', 'friend',
'tourists', 'crunched', 'soothsayer', 'fleetly', 'Romanizations',
'Medicaid', 'persevering', 'flimsy', 'doomsday', 'trillion',
'carcasses', 'guess', 'seersucker', 'ripping', 'affliction',
'wildest', 'spokes', 'sheaths', 'procreate', 'rusticates', 'Schapiro',
'thereafter', 'mistakenly', 'shelf', 'ruination', 'bushel',
'assuredly', 'corrupting', 'federation', 'portmanteau', 'wading',
'incendiary', 'thing', 'wanderers', 'messages', 'Paso', 'reexamined',
'freeings', 'denture', 'potting', 'disturber', 'laborer', 'comrade',
'intercommunicating', 'Pelham', 'reproach', 'Fenton', 'Alva', 'oasis',
'attending', 'cockpit', 'scout', 'Jude', 'gagging', 'jailed',
'crustaceans', 'dirt', 'exquisitely', 'Internet', 'blocker', 'smock',
'Troutman', 'neighboring', 'surprise', 'midscale', 'impart',
'badgering', 'fountain', 'Essen', 'societies', 'redresses',
'afterwards', 'puckering', 'silks', 'Blakey', 'sequel', 'greet',
'basements', 'Aubrey', 'helmsman', 'album', 'wheelers', 'easternmost',
'flock', 'ambassadors', 'astatine', 'supplant', 'gird', 'clockwork',
'foxes', 'rerouting', 'divisional', 'bends', 'spacer',
'physiologically', 'exquisite', 'concerts', 'unbridled', 'crossing',
'rock', 'leatherneck', 'Fortescue', 'reloading', 'Laramie', 'Tim',
'forlorn', 'revert', 'scarcer', 'spigot', 'equality', 'paranormal',
'aggrieves', 'pegs', 'committeewomen', 'documented', 'interrupt',
'emerald', 'Battelle', 'reconverted', 'anticipated', 'prejudices',
'drowsiness', 'trivialities', 'food', 'blackberries', 'Cyclades',
'tourist', 'branching', 'nugget', 'Asilomar', 'repairmen', 'Cowan',
'receptacles', 'nobler', 'Nebraskan', 'territorial', 'chickadee',
'bedbug', 'darted', 'vigilance', 'Octavia', 'summands', 'policemen',
'twirls', 'style', 'outlawing', 'specifiable', 'pang', 'Orpheus',
'epigram', 'Babel', 'butyrate', 'wishing', 'fiendish', 'accentuate',
'much', 'pulsed', 'adorned', 'arbiters', 'counted', 'Afrikaner',
'parameterizes', 'agenda', 'Americanism', 'referenda', 'derived',
'liquidity', 'trembling', 'lordly', 'Agway', 'Dillon', 'propellers',
'statement', 'stickiest', 'thankfully', 'autograph', 'parallel',
'impulse', 'Hamey', 'stylistic', 'disproved', 'inquirer', 'hoisting',
'residues', 'variant', 'colonials', 'dequeued', 'especial', 'Samoa',
'Polaris', 'dismisses', 'surpasses', 'prognosis', 'urinates',
'leaguers', 'ostriches', 'calculative', 'digested', 'divided',
'reconfigurer', 'Lakewood', 'illegalities', 'redundancy',
'approachability', 'masterly', 'cookery', 'crystallized', 'Dunham',
'exclaims', 'mainline', 'Australianizes', 'nationhood', 'pusher',
'ushers', 'paranoia', 'workstations', 'radiance', 'impedes',
'Minotaur', 'cataloging', 'bites', 'fashioning', 'Alsop', 'servants',
'Onondaga', 'paragraph', 'leadings', 'clients', 'Latrobe',
'Cornwallis', 'excitingly', 'calorimetric', 'savior', 'tandem',
'antibiotics', 'excuse', 'brushy', 'selfish', 'naive', 'becomes',
'towers', 'popularizes', 'engender', 'introducing', 'possession',
'slaughtered', 'marginally', 'Packards', 'parabola', 'utopia',
'automata', 'deterrent', 'chocolates', 'objectives', 'clannish',
'aspirin', 'ferociousness', 'primarily', 'armpit', 'handfuls',
'dangle', 'Manila', 'enlivened', 'decrease', 'phylum', 'hardy',
'objectively', 'baskets', 'chaired', 'Sepoy', 'deputy', 'blizzard',
'shootings', 'breathtaking', 'sticking', 'initials', 'epitomized',
'Forrest', 'cellular', 'amatory', 'radioed', 'horrified', 'Neva',
'simultaneous', 'delimiter', 'expulsion', 'Himmler', 'contradiction',
'Remus', 'Franklinizations', 'luggage', 'moisture', 'Jews',
'comptroller', 'brevity', 'contradictions', 'Ohio', 'active',
'babysit', 'China', 'youngest', 'superstition', 'clawing', 'raccoons',
'chose', 'shoreline', 'helmets', 'Jeffersonian', 'papered',
'kindergarten', 'reply', 'succinct', 'split', 'wriggle', 'suitcases',
'nonce', 'grinders', 'anthem', 'showcase', 'maimed', 'blue', 'obeys',
'unreported', 'perusing', 'recalculate', 'rancher', 'demonic',
'Lilliputianize', 'approximation', 'repents', 'yellowness',
'irritates', 'Ferber', 'flashlights', 'booty', 'Neanderthal',
'someday', 'foregoes', 'lingering', 'cloudiness', 'guy', 'consumer',
'Berkowitz', 'relics', 'interpolating', 'reappearing', 'advisements',
'Nolan', 'turrets', 'skeletal', 'skills', 'mammas', 'Winsett',
'wheelings', 'stiffen', 'monkeys', 'plainness', 'braziers', 'Leary',
'advisee', 'jack', 'verb', 'reinterpret', 'geometrical', 'trolleys',
'arboreal', 'overpowered', 'Cuzco', 'poetical', 'admirations',
'Hobbes', 'phonemes', 'Newsweek', 'agitator', 'finally', 'prophets',
'environment', 'easterners', 'precomputed', 'faults', 'rankly',
'swallowing', 'crawl', 'trolley', 'spreading', 'resourceful', 'go',
'demandingly', 'broader', 'spiders', 'Marsha', 'debris', 'operates',
'Dundee', 'alleles', 'crunchier', 'quizzical', 'hanging', 'Fisk']
words = ['banishment', 'indirectly', 'imprecise', 'peeks',
'opportunely', 'bribe', 'sufficiently', 'Occidentalized', 'elapsing',
'fermenting', 'listen', 'orphanage', 'younger', 'draperies', 'Ida',
'cuttlefish', 'mastermind', 'Michaels', 'populations', 'lent',
'cater', 'attentional', 'hastiness', 'dragnet', 'mangling',
'scabbards', 'princely', 'star', 'repeat', 'deviation', 'agers',
'fix', 'digital', 'ambitious', 'transit', 'jeeps', 'lighted',
'Prussianizations', 'Kickapoo', 'virtual', 'Andrew', 'generally',
'boatsman', 'amounts', 'promulgation', 'Malay', 'savaging',
'courtesan', 'nursed', 'hungered', 'shiningly', 'ship', 'presides',
'Parke', 'moderns', 'Jonas', 'unenlightening', 'dearth', 'deer',
'domesticates', 'recognize', 'gong', 'penetrating', 'dependents',
'unusually', 'complications', 'Dennis', 'imbalances', 'nightgown',
'attached', 'testaments', 'congresswoman', 'circuits', 'bumpers',
'braver', 'Boreas', 'hauled', 'Howe', 'seethed', 'cult', 'numismatic',
'vitality', 'differences', 'collapsed', 'Sandburg', 'inches', 'head',
'rhythmic', 'opponent', 'blanketer', 'attorneys', 'hen', 'spies',
'indispensably', 'clinical', 'redirection', 'submit', 'catalysts',
'councilwoman', 'kills', 'topologies', 'noxious', 'exactions',
'dashers', 'balanced', 'slider', 'cancerous', 'bathtubs', 'legged',
'respectably', 'crochets', 'absenteeism', 'arcsine', 'facility',
'cleaners', 'bobwhite', 'Hawkins', 'stockade', 'provisional',
'tenants', 'forearms', 'Knowlton', 'commit', 'scornful',
'pediatrician', 'greets', 'clenches', 'trowels', 'accepts',
'Carboloy', 'Glenn', 'Leigh', 'enroll', 'Madison', 'Macon', 'oiling',
'entertainingly', 'super', 'propositional', 'pliers', 'beneficiary',
'hospitable', 'emigration', 'sift', 'sensor', 'reserved',
'colonization', 'shrilled', 'momentously', 'stevedore', 'Shanghaiing',
'schoolmasters', 'shaken', 'biology', 'inclination', 'immoderate',
'stem', 'allegory', 'economical', 'daytime', 'Newell', 'Moscow',
'archeology', 'ported', 'scandals', 'Blackfoot', 'leery', 'kilobit',
'empire', 'obliviousness', 'productions', 'sacrificed', 'ideals',
'enrolling', 'certainties', 'Capsicum', 'Brookdale', 'Markism',
'unkind', 'dyers', 'legislates', 'grotesquely', 'megawords',
'arbitrary', 'laughing', 'wildcats', 'thrower', 'sex', 'devils',
'Wehr', 'ablates', 'consume', 'gossips', 'doorways', 'Shari',
'advanced', 'enumerable', 'existentially', 'stunt', 'auctioneers',
'scheduler', 'blanching', 'petulance', 'perceptibly', 'vapors',
'progressed', 'rains', 'intercom', 'emergency', 'increased',
'fluctuating', 'Krishna', 'silken', 'reformed', 'transformation',
'easter', 'fares', 'comprehensible', 'trespasses', 'hallmark',
'tormenter', 'breastworks', 'brassiere', 'bladders', 'civet', 'death',
'transformer', 'tolerably', 'bugle', 'clergy', 'mantels', 'satin',
'Boswellizes', 'Bloomington', 'notifier', 'Filippo', 'circling',
'unassigned', 'dumbness', 'sentries', 'representativeness', 'souped',
'Klux', 'Kingstown', 'gerund', 'Russell', 'splices', 'bellow',
'bandies', 'beefers', 'cameramen', 'appalled', 'Ionian', 'butterball',
'Portland', 'pleaded', 'admiringly', 'pricks', 'hearty', 'corer',
'deliverable', 'accountably', 'mentors', 'accorded',
'acknowledgement', 'Lawrenceville', 'morphology', 'eucalyptus',
'Rena', 'enchanting', 'tighter', 'scholars', 'graduations', 'edges',
'Latinization', 'proficiency', 'monolithic', 'parenthesizing', 'defy',
'shames', 'enjoyment', 'Purdue', 'disagrees', 'barefoot', 'maims',
'flabbergast', 'dishonorable', 'interpolation', 'fanatics', 'dickens',
'abysses', 'adverse', 'components', 'bowl', 'belong', 'Pipestone',
'trainees', 'paw', 'pigtail', 'feed', 'whore', 'conditioner',
'Volstead', 'voices', 'strain', 'inhabits', 'Edwin', 'discourses',
'deigns', 'cruiser', 'biconvex', 'biking', 'depreciation', 'Harrison',
'Persian', 'stunning', 'agar', 'rope', 'wagoner', 'elections',
'reticulately', 'Cruz', 'pulpits', 'wilt', 'peels', 'plants',
'administerings', 'deepen', 'rubs', 'hence', 'dissension', 'implored',
'bereavement', 'abyss', 'Pennsylvania', 'benevolent', 'corresponding',
'Poseidon', 'inactive', 'butchers', 'Mach', 'woke', 'loading',
'utilizing', 'Hoosier', 'undo', 'Semitization', 'trigger', 'Mouthe',
'mark', 'disgracefully', 'copier', 'futility', 'gondola', 'algebraic',
'lecturers', 'sponged', 'instigators', 'looted', 'ether', 'trust',
'feeblest', 'sequencer', 'disjointness', 'congresses', 'Vicksburg',
'incompatibilities', 'commend', 'Luxembourg', 'reticulation',
'instructively', 'reconstructs', 'bricks', 'attache', 'Englishman',
'provocation', 'roughen', 'cynic', 'plugged', 'scrawls', 'antipode',
'injected', 'Daedalus', 'Burnsides', 'asker', 'confronter',
'merriment', 'disdain', 'thicket', 'stinker', 'great', 'tiers',
'oust', 'antipodes', 'Macintosh', 'tented', 'packages',
'Mediterraneanize', 'hurts', 'orthodontist', 'seeder', 'readying',
'babying', 'Florida', 'Sri', 'buckets', 'complementary',
'cartographer', 'chateaus', 'shaves', 'thinkable', 'Tehran',
'Gordian', 'Angles', 'arguable', 'bureau', 'smallest', 'fans',
'navigated', 'dipole', 'bootleg', 'distinctive', 'minimization',
'absorbed', 'surmised', 'Malawi', 'absorbent', 'close', 'conciseness',
'hopefully', 'declares', 'descent', 'trick', 'portend', 'unable',
'mildly', 'Morse', 'reference', 'scours', 'Caribbean', 'battlers',
'astringency', 'likelier', 'Byronizes', 'econometric', 'grad',
'steak', 'Austrian', 'ban', 'voting', 'Darlington', 'bison', 'Cetus',
'proclaim', 'Gilbertson', 'evictions', 'submittal', 'bearings',
'Gothicizer', 'settings', 'McMahon', 'densities', 'determinants',
'period', 'DeKastere', 'swindle', 'promptness', 'enablers', 'wordy',
'during', 'tables', 'responder', 'baffle', 'phosgene', 'muttering',
'limiters', 'custodian', 'prevented', 'Stouffer', 'waltz', 'Videotex',
'brainstorms', 'alcoholism', 'jab', 'shouldering', 'screening',
'explicitly', 'earner', 'commandment', 'French', 'scrutinizing',
'Gemma', 'capacitive', 'sheriff', 'herbivore', 'Betsey', 'Formosa',
'scorcher', 'font', 'damming', 'soldiers', 'flack', 'Marks',
'unlinking', 'serenely', 'rotating', 'converge', 'celebrities',
'unassailable', 'bawling', 'wording', 'silencing', 'scotch',
'coincided', 'masochists', 'graphs', 'pernicious', 'disease',
'depreciates', 'later', 'torus', 'interject', 'mutated', 'causer',
'messy', 'Bechtel', 'redundantly', 'profoundest', 'autopsy',
'philosophic', 'iterate', 'Poisson', 'horridly', 'silversmith',
'millennium', 'plunder', 'salmon', 'missioner', 'advances', 'provers',
'earthliness', 'manor', 'resurrectors', 'Dahl', 'canto', 'gangrene',
'gabler', 'ashore', 'frictionless', 'expansionism', 'emphasis',
'preservations', 'Duane', 'descend', 'isolated', 'firmware',
'dynamites', 'scrawled', 'cavemen', 'ponder', 'prosperity', 'squaw',
'vulnerable', 'opthalmic', 'Simms', 'unite', 'totallers', 'Waring',
'enforced', 'bridge', 'collecting', 'sublime', 'Moore', 'gobble',
'criticizes', 'daydreams', 'sedate', 'apples', 'Concordia',
'subsequence', 'distill', 'Allan', 'seizure', 'Isadore', 'Lancashire',
'spacings', 'corresponded', 'hobble', 'Boonton', 'genuineness',
'artifact', 'gratuities', 'interviewee', 'Vladimir', 'mailable',
'Bini', 'Kowalewski', 'interprets', 'bereave', 'evacuated', 'friend',
'tourists', 'crunched', 'soothsayer', 'fleetly', 'Romanizations',
'Medicaid', 'persevering', 'flimsy', 'doomsday', 'trillion',
'carcasses', 'guess', 'seersucker', 'ripping', 'affliction',
'wildest', 'spokes', 'sheaths', 'procreate', 'rusticates', 'Schapiro',
'thereafter', 'mistakenly', 'shelf', 'ruination', 'bushel',
'assuredly', 'corrupting', 'federation', 'portmanteau', 'wading',
'incendiary', 'thing', 'wanderers', 'messages', 'Paso', 'reexamined',
'freeings', 'denture', 'potting', 'disturber', 'laborer', 'comrade',
'intercommunicating', 'Pelham', 'reproach', 'Fenton', 'Alva', 'oasis',
'attending', 'cockpit', 'scout', 'Jude', 'gagging', 'jailed',
'crustaceans', 'dirt', 'exquisitely', 'Internet', 'blocker', 'smock',
'Troutman', 'neighboring', 'surprise', 'midscale', 'impart',
'badgering', 'fountain', 'Essen', 'societies', 'redresses',
'afterwards', 'puckering', 'silks', 'Blakey', 'sequel', 'greet',
'basements', 'Aubrey', 'helmsman', 'album', 'wheelers', 'easternmost',
'flock', 'ambassadors', 'astatine', 'supplant', 'gird', 'clockwork',
'foxes', 'rerouting', 'divisional', 'bends', 'spacer',
'physiologically', 'exquisite', 'concerts', 'unbridled', 'crossing',
'rock', 'leatherneck', 'Fortescue', 'reloading', 'Laramie', 'Tim',
'forlorn', 'revert', 'scarcer', 'spigot', 'equality', 'paranormal',
'aggrieves', 'pegs', 'committeewomen', 'documented', 'interrupt',
'emerald', 'Battelle', 'reconverted', 'anticipated', 'prejudices',
'drowsiness', 'trivialities', 'food', 'blackberries', 'Cyclades',
'tourist', 'branching', 'nugget', 'Asilomar', 'repairmen', 'Cowan',
'receptacles', 'nobler', 'Nebraskan', 'territorial', 'chickadee',
'bedbug', 'darted', 'vigilance', 'Octavia', 'summands', 'policemen',
'twirls', 'style', 'outlawing', 'specifiable', 'pang', 'Orpheus',
'epigram', 'Babel', 'butyrate', 'wishing', 'fiendish', 'accentuate',
'much', 'pulsed', 'adorned', 'arbiters', 'counted', 'Afrikaner',
'parameterizes', 'agenda', 'Americanism', 'referenda', 'derived',
'liquidity', 'trembling', 'lordly', 'Agway', 'Dillon', 'propellers',
'statement', 'stickiest', 'thankfully', 'autograph', 'parallel',
'impulse', 'Hamey', 'stylistic', 'disproved', 'inquirer', 'hoisting',
'residues', 'variant', 'colonials', 'dequeued', 'especial', 'Samoa',
'Polaris', 'dismisses', 'surpasses', 'prognosis', 'urinates',
'leaguers', 'ostriches', 'calculative', 'digested', 'divided',
'reconfigurer', 'Lakewood', 'illegalities', 'redundancy',
'approachability', 'masterly', 'cookery', 'crystallized', 'Dunham',
'exclaims', 'mainline', 'Australianizes', 'nationhood', 'pusher',
'ushers', 'paranoia', 'workstations', 'radiance', 'impedes',
'Minotaur', 'cataloging', 'bites', 'fashioning', 'Alsop', 'servants',
'Onondaga', 'paragraph', 'leadings', 'clients', 'Latrobe',
'Cornwallis', 'excitingly', 'calorimetric', 'savior', 'tandem',
'antibiotics', 'excuse', 'brushy', 'selfish', 'naive', 'becomes',
'towers', 'popularizes', 'engender', 'introducing', 'possession',
'slaughtered', 'marginally', 'Packards', 'parabola', 'utopia',
'automata', 'deterrent', 'chocolates', 'objectives', 'clannish',
'aspirin', 'ferociousness', 'primarily', 'armpit', 'handfuls',
'dangle', 'Manila', 'enlivened', 'decrease', 'phylum', 'hardy',
'objectively', 'baskets', 'chaired', 'Sepoy', 'deputy', 'blizzard',
'shootings', 'breathtaking', 'sticking', 'initials', 'epitomized',
'Forrest', 'cellular', 'amatory', 'radioed', 'horrified', 'Neva',
'simultaneous', 'delimiter', 'expulsion', 'Himmler', 'contradiction',
'Remus', 'Franklinizations', 'luggage', 'moisture', 'Jews',
'comptroller', 'brevity', 'contradictions', 'Ohio', 'active',
'babysit', 'China', 'youngest', 'superstition', 'clawing', 'raccoons',
'chose', 'shoreline', 'helmets', 'Jeffersonian', 'papered',
'kindergarten', 'reply', 'succinct', 'split', 'wriggle', 'suitcases',
'nonce', 'grinders', 'anthem', 'showcase', 'maimed', 'blue', 'obeys',
'unreported', 'perusing', 'recalculate', 'rancher', 'demonic',
'Lilliputianize', 'approximation', 'repents', 'yellowness',
'irritates', 'Ferber', 'flashlights', 'booty', 'Neanderthal',
'someday', 'foregoes', 'lingering', 'cloudiness', 'guy', 'consumer',
'Berkowitz', 'relics', 'interpolating', 'reappearing', 'advisements',
'Nolan', 'turrets', 'skeletal', 'skills', 'mammas', 'Winsett',
'wheelings', 'stiffen', 'monkeys', 'plainness', 'braziers', 'Leary',
'advisee', 'jack', 'verb', 'reinterpret', 'geometrical', 'trolleys',
'arboreal', 'overpowered', 'Cuzco', 'poetical', 'admirations',
'Hobbes', 'phonemes', 'Newsweek', 'agitator', 'finally', 'prophets',
'environment', 'easterners', 'precomputed', 'faults', 'rankly',
'swallowing', 'crawl', 'trolley', 'spreading', 'resourceful', 'go',
'demandingly', 'broader', 'spiders', 'Marsha', 'debris', 'operates',
'Dundee', 'alleles', 'crunchier', 'quizzical', 'hanging', 'Fisk']
wordsd = {}
for word in words:
......@@ -702,11 +716,11 @@ def collect_options(args, jobs, options):
collect_options(list(d['options']), jobs, options)
elif name in options:
v = args.pop(0)
if options[name] != None:
if options[name] is not None:
raise ValueError(
"Duplicate values for %s, %s and %s"
% (name, v, options[name])
)
)
options[name] = v
elif name == 'setup':
options['setup'] = 1
......@@ -720,8 +734,8 @@ def collect_options(args, jobs, options):
raise ValueError(
"Duplicate parameter %s for job %s"
% (name, job)
)
kw[name]=v
)
kw[name] = v
if 'frequency' in kw:
frequency = kw['frequency']
del kw['frequency']
......@@ -756,6 +770,7 @@ def find_lib_python():
return p
raise ValueError("Couldn't find lib/python")
def main(args=None):
lib_python = find_lib_python()
sys.path.insert(0, lib_python)
......@@ -767,7 +782,7 @@ def main(args=None):
sys.exit(0)
print(args)
random.seed(hash(tuple(args))) # always use the same for the given args
random.seed(hash(tuple(args))) # always use the same for the given args
options = {"mbox": None, "threads": None}
jobdefs = []
......@@ -830,6 +845,5 @@ def zetup(configfile_name):
dropPrivileges(opts.configroot)
if __name__ == '__main__':
main()
......@@ -304,7 +304,7 @@ class ObjectWriter(object):
oid = obj.oid
if oid is None:
target = obj() # get the referenced object
target = obj() # get the referenced object
oid = target._p_oid
if oid is None:
# Here we are causing the object to be saved in
......@@ -327,7 +327,6 @@ class ObjectWriter(object):
else:
return ['w', (oid, obj.database_name)]
# Since we have an oid, we have either a persistent instance
# (an instance of Persistent), or a persistent class.
......@@ -357,14 +356,14 @@ class ObjectWriter(object):
raise InvalidObjectReference(
"Attempt to store an object from a foreign "
"database connection", self._jar, obj,
)
)
if self._jar.get_connection(database_name) is not obj._p_jar:
raise InvalidObjectReference(
"Attempt to store a reference to an object from "
"a separate connection to the same database or "
"multidatabase", self._jar, obj,
)
)
# OK, we have an object from another database.
# Lets make sure the object ws not *just* loaded.
......@@ -374,7 +373,7 @@ class ObjectWriter(object):
"A new object is reachable from multiple databases. "
"Won't try to guess which one was correct!",
self._jar, obj,
)
)
oid = binary(oid)
klass = type(obj)
......@@ -412,7 +411,7 @@ class ObjectWriter(object):
# revisit this in the future.
newargs = getattr(obj, "__getnewargs__", None)
if (isinstance(getattr(klass, '_p_oid', 0), _oidtypes)
and klass.__module__):
and klass.__module__):
# This is a persistent class with a non-empty module. This
# uses pickle format #3 or #7.
klass = klass.__module__, klass.__name__
......@@ -443,6 +442,7 @@ class ObjectWriter(object):
def __iter__(self):
return NewObjectIterator(self._stack)
class NewObjectIterator(object):
# The pickler is used as a forward iterator when the connection
......@@ -463,6 +463,7 @@ class NewObjectIterator(object):
next = __next__
class ObjectReader(object):
def __init__(self, conn=None, cache=None, factory=None):
......@@ -481,7 +482,8 @@ class ObjectReader(object):
def find_global(modulename, name):
return factory(conn, modulename, name)
unpickler = PersistentUnpickler(find_global, self._persistent_load, file)
unpickler = PersistentUnpickler(
find_global, self._persistent_load, file)
return unpickler
......@@ -542,7 +544,6 @@ class ObjectReader(object):
loaders['m'] = load_multi_persistent
def load_persistent_weakref(self, oid, database_name=None):
if not isinstance(oid, bytes):
assert isinstance(oid, str)
......@@ -622,9 +623,9 @@ class ObjectReader(object):
def getState(self, pickle):
unpickler = self._get_unpickler(pickle)
try:
unpickler.load() # skip the class metadata
unpickler.load() # skip the class metadata
return unpickler.load()
except EOFError as msg:
except EOFError:
log = logging.getLogger("ZODB.serialize")
log.exception("Unpickling error: %r", pickle)
raise
......@@ -673,9 +674,11 @@ def referencesf(p, oids=None):
return oids
oid_klass_loaders = {
'w': lambda oid, database_name=None: None,
}
}
def get_refs(a_pickle):
"""Return oid and class information for references in a pickle
......
......@@ -34,6 +34,7 @@ from random import randint
from .. import utils
class BasicStorage(object):
def checkBasics(self):
self.assertEqual(self._storage.lastTransaction(), ZERO)
......@@ -165,13 +166,13 @@ class BasicStorage(object):
def checkLen(self):
# len(storage) reports the number of objects.
# check it is zero when empty
self.assertEqual(len(self._storage),0)
self.assertEqual(len(self._storage), 0)
# check it is correct when the storage contains two object.
# len may also be zero, for storages that do not keep track
# of this number
self._dostore(data=MinPO(22))
self._dostore(data=MinPO(23))
self.assertTrue(len(self._storage) in [0,2])
self.assertTrue(len(self._storage) in [0, 2])
def checkGetSize(self):
self._dostore(data=MinPO(25))
......@@ -208,7 +209,8 @@ class BasicStorage(object):
def _do_store_in_separate_thread(self, oid, revid, voted):
# We'll run the competing trans in a separate thread:
thread = threading.Thread(name='T2',
target=self._dostore, args=(oid,), kwargs=dict(revid=revid))
target=self._dostore, args=(oid,),
kwargs=dict(revid=revid))
thread.daemon = True
thread.start()
thread.join(.1)
......@@ -218,9 +220,9 @@ class BasicStorage(object):
oid = b'\0\0\0\0\0\0\0\xf0'
tid = self._dostore(oid)
tid2 = self._dostore(oid, revid=tid)
data = b'cpersistent\nPersistent\nq\x01.N.' # a simple persistent obj
data = b'cpersistent\nPersistent\nq\x01.N.' # a simple persistent obj
#----------------------------------------------------------------------
# ---------------------------------------------------------------------
# stale read
t = TransactionMetaData()
self._storage.tpc_begin(t)
......@@ -233,12 +235,12 @@ class BasicStorage(object):
self.assertEqual(v.oid, oid)
self.assertEqual(v.serials, (tid2, tid))
else:
if 0: self.assertTrue(False, "No conflict error")
if 0:
self.assertTrue(False, "No conflict error")
self._storage.tpc_abort(t)
#----------------------------------------------------------------------
# ---------------------------------------------------------------------
# non-stale read, no stress. :)
t = TransactionMetaData()
self._storage.tpc_begin(t)
......@@ -248,7 +250,7 @@ class BasicStorage(object):
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
#----------------------------------------------------------------------
# ---------------------------------------------------------------------
# non-stale read, competition after vote. The competing
# transaction must produce a tid > this transaction's tid
t = TransactionMetaData()
......@@ -268,7 +270,7 @@ class BasicStorage(object):
utils.load_current(
self._storage, b'\0\0\0\0\0\0\0\xf3')[1])
#----------------------------------------------------------------------
# ---------------------------------------------------------------------
# non-stale competing trans after checkCurrentSerialInTransaction
t = TransactionMetaData()
self._storage.tpc_begin(t)
......@@ -286,7 +288,7 @@ class BasicStorage(object):
try:
self._storage.tpc_vote(t)
except POSException.ReadConflictError:
thread.join() # OK :)
thread.join() # OK :)
else:
self._storage.tpc_finish(t)
thread.join()
......@@ -295,7 +297,6 @@ class BasicStorage(object):
tid4 >
utils.load_current(self._storage, b'\0\0\0\0\0\0\0\xf4')[1])
def check_tid_ordering_w_commit(self):
# It's important that storages always give a consistent
......@@ -322,6 +323,7 @@ class BasicStorage(object):
self._storage.tpc_vote(t)
to_join = []
def run_in_thread(func):
t = threading.Thread(target=func)
t.daemon = True
......@@ -330,6 +332,7 @@ class BasicStorage(object):
started = threading.Event()
finish = threading.Event()
@run_in_thread
def commit():
def callback(tid):
......@@ -349,7 +352,6 @@ class BasicStorage(object):
attempts.append(1)
attempts_cond.notify_all()
@run_in_thread
def load():
update_attempts()
......@@ -360,6 +362,7 @@ class BasicStorage(object):
if hasattr(self._storage, 'getTid'):
expected_attempts += 1
@run_in_thread
def getTid():
update_attempts()
......@@ -367,6 +370,7 @@ class BasicStorage(object):
if hasattr(self._storage, 'lastInvalidations'):
expected_attempts += 1
@run_in_thread
def lastInvalidations():
update_attempts()
......@@ -378,7 +382,7 @@ class BasicStorage(object):
while len(attempts) < expected_attempts:
attempts_cond.wait()
time.sleep(.01) # for good measure :)
time.sleep(.01) # for good measure :)
finish.set()
for t in to_join:
......@@ -389,15 +393,16 @@ class BasicStorage(object):
for m, tid in results.items():
self.assertEqual(tid, tids[1])
# verify storage/Connection for race in between load/open and local invalidations.
# verify storage/Connection for race in between load/open and local
# invalidations.
# https://github.com/zopefoundation/ZEO/issues/166
# https://github.com/zopefoundation/ZODB/issues/290
@with_high_concurrency
def check_race_loadopen_vs_local_invalidate(self):
db = DB(self._storage)
# init initializes the database with two integer objects - obj1/obj2
# init initializes the database with two integer objects - obj1/obj2
# that are set to 0.
def init():
transaction.begin()
......@@ -417,6 +422,7 @@ class BasicStorage(object):
# cache is not stale.
failed = threading.Event()
failure = [None]
def verify():
transaction.begin()
zconn = db.open()
......@@ -433,10 +439,12 @@ class BasicStorage(object):
v1 = obj1.value
v2 = obj2.value
if v1 != v2:
failure[0] = "verify: obj1.value (%d) != obj2.value (%d)" % (v1, v2)
failure[0] = "verify: obj1.value (%d) != obj2.value (%d)" % (
v1, v2)
failed.set()
transaction.abort() # we did not changed anything; also fails with commit
# we did not changed anything; also fails with commit:
transaction.abort()
zconn.close()
# modify changes obj1/obj2 by doing `objX.value += 1`.
......@@ -457,25 +465,27 @@ class BasicStorage(object):
transaction.commit()
zconn.close()
# xrun runs f in a loop until either N iterations, or until failed is set.
# xrun runs f in a loop until either N iterations, or until failed is
# set.
def xrun(f, N):
try:
for i in range(N):
#print('%s.%d' % (f.__name__, i))
# print('%s.%d' % (f.__name__, i))
f()
if failed.is_set():
break
except:
except: # noqa: E722 do not use bare 'except'
failed.set()
raise
# loop verify and modify concurrently.
init()
N = 500
tverify = threading.Thread(name='Tverify', target=xrun, args=(verify, N))
tmodify = threading.Thread(name='Tmodify', target=xrun, args=(modify, N))
tverify = threading.Thread(
name='Tverify', target=xrun, args=(verify, N))
tmodify = threading.Thread(
name='Tmodify', target=xrun, args=(modify, N))
tverify.start()
tmodify.start()
tverify.join(60)
......@@ -484,13 +494,13 @@ class BasicStorage(object):
if failed.is_set():
self.fail(failure[0])
# client-server storages like ZEO, NEO and RelStorage allow several storage
# clients to be connected to single storage server.
#
# For client-server storages test subclasses should implement
# _new_storage_client to return new storage client that is connected to the
# same storage server self._storage is connected to.
def _new_storage_client(self):
raise NotImplementedError
......@@ -510,10 +520,12 @@ class BasicStorage(object):
# the test will be skipped from main thread because dbopen is
# first used in init on the main thread before any other thread
# is spawned.
self.skipTest("%s does not implement _new_storage_client" % type(self))
self.skipTest(
"%s does not implement _new_storage_client" % type(self))
return DB(zstor)
# init initializes the database with two integer objects - obj1/obj2 that are set to 0.
# init initializes the database with two integer objects - obj1/obj2
# that are set to 0.
def init():
db = dbopen()
......@@ -529,23 +541,27 @@ class BasicStorage(object):
db.close()
# we'll run 8 T workers concurrently. As of 20210416, due to race conditions
# in ZEO, it triggers the bug where T sees stale obj2 with obj1.value != obj2.value
# we'll run 8 T workers concurrently. As of 20210416, due to race
# conditions in ZEO, it triggers the bug where T sees stale obj2 with
# obj1.value != obj2.value
#
# The probability to reproduce the bug is significantly reduced with
# decreasing n(workers): almost never with nwork=2 and sometimes with nwork=4.
# decreasing n(workers): almost never with nwork=2 and sometimes with
# nwork=4.
nwork = 8
# T is a worker that accesses obj1/obj2 in a loop and verifies
# `obj1.value == obj2.value` invariant.
#
# access to obj1 is organized to always trigger loading from zstor.
# access to obj2 goes through zconn cache and so verifies whether the cache is not stale.
# access to obj2 goes through zconn cache and so verifies whether the
# cache is not stale.
#
# Once in a while T tries to modify obj{1,2}.value maintaining the invariant as
# test source of changes for other workers.
# Once in a while T tries to modify obj{1,2}.value maintaining the
# invariant as test source of changes for other workers.
failed = threading.Event()
failure = [None] * nwork # [tx] is failure from T(tx)
failure = [None] * nwork # [tx] is failure from T(tx)
def T(tx, N):
db = dbopen()
......@@ -565,37 +581,38 @@ class BasicStorage(object):
i1 = obj1.value
i2 = obj2.value
if i1 != i2:
#print('FAIL')
failure[tx] = "T%s: obj1.value (%d) != obj2.value (%d)" % (tx, i1, i2)
# print('FAIL')
failure[tx] = (
"T%s: obj1.value (%d) != obj2.value (%d)" % (
tx, i1, i2))
failed.set()
# change objects once in a while
if randint(0,4) == 0:
#print("T%s: modify" % tx)
if randint(0, 4) == 0:
# print("T%s: modify" % tx)
obj1.value += 1
obj2.value += 1
try:
transaction.commit()
except POSException.ConflictError:
#print('conflict -> ignore')
# print('conflict -> ignore')
transaction.abort()
zconn.close()
try:
for i in range(N):
#print('T%s.%d' % (tx, i))
# print('T%s.%d' % (tx, i))
t_()
if failed.is_set():
break
except:
except: # noqa: E722 do not use bare 'except'
failed.set()
raise
finally:
db.close()
# run the workers concurrently.
init()
......
......@@ -19,7 +19,8 @@ from ZODB.POSException import ConflictError, UndoError
from persistent import Persistent
from transaction import TransactionManager
from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle
from ZODB.tests.StorageTestBase import zodb_pickle
class PCounter(Persistent):
......@@ -42,19 +43,23 @@ class PCounter(Persistent):
# Insecurity: What if _p_resolveConflict _thinks_ it resolved the
# conflict, but did something wrong?
class PCounter2(PCounter):
def _p_resolveConflict(self, oldState, savedState, newState):
raise ConflictError
class PCounter3(PCounter):
def _p_resolveConflict(self, oldState, savedState, newState):
raise AttributeError("no attribute (testing conflict resolution)")
class PCounter4(PCounter):
def _p_resolveConflict(self, oldState, savedState):
raise RuntimeError("Can't get here; not enough args")
class ConflictResolvingStorage(object):
def checkResolve(self, resolvable=True):
......@@ -92,7 +97,6 @@ class ConflictResolvingStorage(object):
def checkZClassesArentResolved(self):
from ZODB.ConflictResolution import find_global, BadClassName
dummy_class_tuple = ('*foobar', ())
self.assertRaises(BadClassName, find_global, '*foobar', ())
def checkBuggyResolve1(self):
......@@ -108,7 +112,7 @@ class ConflictResolvingStorage(object):
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
self.assertRaises(ConflictError,
self._dostoreNP,
oid, revid=revid1, data=zodb_pickle(obj))
......@@ -126,11 +130,12 @@ class ConflictResolvingStorage(object):
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
self.assertRaises(ConflictError,
self._dostoreNP,
oid, revid=revid1, data=zodb_pickle(obj))
class ConflictResolvingTransUndoStorage(object):
def checkUndoConflictResolution(self):
......@@ -145,7 +150,7 @@ class ConflictResolvingTransUndoStorage(object):
obj.inc()
revid_b = self._dostore(oid, revid=revid_a, data=obj)
obj.inc()
revid_c = self._dostore(oid, revid=revid_b, data=obj)
self._dostore(oid, revid=revid_b, data=obj)
# Start the undo
info = self._storage.undoInfo()
tid = info[1]['id']
......@@ -167,7 +172,7 @@ class ConflictResolvingTransUndoStorage(object):
obj.inc()
revid_b = self._dostore(oid, revid=revid_a, data=obj)
obj.inc()
revid_c = self._dostore(oid, revid=revid_b, data=obj)
self._dostore(oid, revid=revid_b, data=obj)
# Start the undo
info = self._storage.undoInfo()
tid = info[1]['id']
......
......@@ -23,6 +23,7 @@ from ZODB.utils import load_current
from .StorageTestBase import StorageTestBase
class FileStorageCorruptTests(StorageTestBase):
def setUp(self):
......
......@@ -21,6 +21,7 @@ import sys
from time import time, sleep
from ZODB.tests.MinPO import MinPO
class HistoryStorage(object):
def checkSimpleHistory(self):
self._checkHistory((11, 12, 13))
......@@ -29,7 +30,7 @@ class HistoryStorage(object):
start = time()
# Store a couple of revisions of the object
oid = self._storage.new_oid()
self.assertRaises(KeyError,self._storage.history,oid)
self.assertRaises(KeyError, self._storage.history, oid)
revids = [None]
for data in data:
if sys.platform == 'win32':
......
......@@ -32,6 +32,7 @@ except ImportError:
# Py3: zip() already returns an iterable.
pass
class IteratorCompare(object):
def iter_verify(self, txniter, revids, val0):
......@@ -66,14 +67,14 @@ class IteratorStorage(IteratorCompare):
def checkUndoZombie(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(94))
self._dostore(oid, data=MinPO(94))
# Get the undo information
info = self._storage.undoInfo()
tid = info[0]['id']
# Undo the creation of the object, rendering it a zombie
t = TransactionMetaData()
self._storage.tpc_begin(t)
oids = self._storage.undo(tid, t)
self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# Now attempt to iterator over the storage
......@@ -95,10 +96,10 @@ class IteratorStorage(IteratorCompare):
# always return x.
class ext(dict):
def __reduce__(self):
return dict,(tuple(self.items()),)
return dict, (tuple(self.items()),)
ext = ext(foo=1)
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(1), extension=ext)
self._dostore(oid, data=MinPO(1), extension=ext)
txn, = self._storage.iterator()
self.assertEqual(txn.extension, ext)
try:
......
......@@ -17,6 +17,7 @@ from ZODB.utils import load_current
SHORT_DELAY = 0.01
class TestThread(threading.Thread):
"""Base class for defining threads that run from unittest.
......@@ -35,7 +36,7 @@ class TestThread(threading.Thread):
def run(self):
try:
self.runtest()
except:
except: # noqa: E722 do not use bare 'except'
self._exc_info = sys.exc_info()
def join(self, timeout=None):
......@@ -44,6 +45,7 @@ class TestThread(threading.Thread):
raise six.reraise(
self._exc_info[0], self._exc_info[1], self._exc_info[2])
class ZODBClientThread(TestThread):
__super_init = TestThread.__init__
......@@ -107,6 +109,7 @@ class ZODBClientThread(TestThread):
raise ConflictError("Exceeded %d attempts to read" % MAXRETRIES)
class StorageClientThread(TestThread):
__super_init = TestThread.__init__
......@@ -159,6 +162,7 @@ class StorageClientThread(TestThread):
self.pause()
self.oids[oid] = revid
class ExtStorageClientThread(StorageClientThread):
def runtest(self):
......@@ -211,6 +215,7 @@ class ExtStorageClientThread(StorageClientThread):
for obj in iter:
pass
class MTStorage(object):
"Test a storage with multiple client threads executing concurrently."
......
......@@ -110,7 +110,7 @@ class MVCCMappingStorage(MappingStorage):
self._polled_tid = self._ltid = new_tid
return list(changed_oids)
def tpc_finish(self, transaction, func = lambda tid: None):
def tpc_finish(self, transaction, func=lambda tid: None):
self._data_snapshot = None
with self._main_lock:
return MappingStorage.tpc_finish(self, transaction, func)
......
......@@ -14,12 +14,13 @@
"""A minimal persistent object to use for tests"""
from persistent import Persistent
class MinPO(Persistent):
def __init__(self, value=None):
self.value = value
def __cmp__(self, aMinPO):
return cmp(self.value, aMinPO.value)
return cmp(self.value, aMinPO.value) # noqa: F821 undefined name 'cmp'
def __hash__(self):
return hash(self.value)
......
......@@ -31,7 +31,6 @@ import transaction
import ZODB.interfaces
import ZODB.tests.util
from ZODB.tests.util import time_monotonically_increases
import zope.testing.setupstack
from ZODB.utils import load_current
......@@ -81,6 +80,8 @@ class C(Persistent):
# serialize the persistent id of the object instead of the object's state.
# That sets the pickle up for proper sniffing by the referencesf machinery.
# Fun, huh?
def dumps(obj):
def getpersid(obj):
if hasattr(obj, 'getoid'):
......@@ -92,6 +93,7 @@ def dumps(obj):
p.dump(None)
return s.getvalue()
def pdumps(obj):
s = BytesIO()
p = Pickler(s, _protocol)
......@@ -245,9 +247,8 @@ class PackableStorage(PackableStorageBase):
# True if we got beyond this line, False if it raised an
# exception (one possible Conflict cause):
# self.root[index].value = MinPO(j)
def cmp_by_time(a, b):
return cmp((a[1], a[0]), (b[1], b[0]))
outcomes.sort(cmp_by_time)
outcomes.sort(key=lambda x: (x[1], x[0]))
counts = [0] * 4
for outcome in outcomes:
n = len(outcome)
......@@ -528,6 +529,7 @@ class PackableStorage(PackableStorageBase):
eq(pobj.getoid(), oid2)
eq(pobj.value, 11)
class PackableStorageWithOptionalGC(PackableStorage):
def checkPackAllRevisionsNoGC(self):
......@@ -569,7 +571,6 @@ class PackableStorageWithOptionalGC(PackableStorage):
self._storage.loadSerial(oid, revid3)
class PackableUndoStorage(PackableStorageBase):
def checkPackUnlinkedFromRoot(self):
......@@ -716,13 +717,15 @@ class PackableUndoStorage(PackableStorageBase):
self._dostoreNP(oid2, revid=revid22,
data=pdumps(obj2), description="2-5")
# Now pack
self.assertEqual(6,len(self._storage.undoLog()))
self.assertEqual(6, len(self._storage.undoLog()))
print('\ninitial undoLog was')
for r in self._storage.undoLog(): print(r)
for r in self._storage.undoLog():
print(r)
self._storage.pack(packtime, referencesf)
# The undo log contains only two undoable transaction.
print('\nafter packing undoLog was')
for r in self._storage.undoLog(): print(r)
for r in self._storage.undoLog():
print(r)
# what can we assert about that?
......@@ -774,6 +777,7 @@ class ClientThread(TestThread):
conn.close()
class ElapsedTimer(object):
def __init__(self, start_time):
self.start_time = start_time
......
......@@ -15,12 +15,13 @@
from ZODB.utils import load_current
class PersistentStorage(object):
def checkUpdatesPersist(self):
oids = []
def new_oid_wrapper(l=oids, new_oid=self._storage.new_oid):
def new_oid_wrapper(l=oids, new_oid=self._storage.new_oid): # noqa: E741 E501 ambiguous variable name 'l' and line too long
oid = new_oid()
l.append(oid)
return oid
......
......@@ -16,6 +16,7 @@ from ZODB.POSException import ReadOnlyError, Unsupported
from ZODB.utils import load_current
class ReadOnlyStorage(object):
def _create_data(self):
......
......@@ -41,9 +41,9 @@ class RecoveryStorage(IteratorDeepCompare):
db = DB(self._storage)
c = db.open()
r = c.root()
obj = r["obj1"] = MinPO(1)
r["obj1"] = MinPO(1)
transaction.commit()
obj = r["obj2"] = MinPO(1)
r["obj2"] = MinPO(1)
transaction.commit()
self._dst.copyTransactionsFrom(self._storage)
......@@ -57,7 +57,7 @@ class RecoveryStorage(IteratorDeepCompare):
# Get the last transaction and its record iterator. Record iterators
# can't be accessed out-of-order, so we need to do this in a bit
# complicated way:
for final in it:
for final in it:
records = list(final)
self._dst.tpc_begin(final, final.tid, final.status)
......@@ -151,7 +151,7 @@ class RecoveryStorage(IteratorDeepCompare):
tid = info[0]['id']
t = TransactionMetaData()
self._storage.tpc_begin(t)
oids = self._storage.undo(tid, t)
self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
......@@ -175,7 +175,7 @@ class RecoveryStorage(IteratorDeepCompare):
tid = info[0]['id']
t = TransactionMetaData()
self._storage.tpc_begin(t)
oids = self._storage.undo(tid, t)
self._storage.undo(tid, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
......
......@@ -107,7 +107,6 @@ class RevisionStorage(object):
self.assertEqual(start, revs[11])
self.assertEqual(end, revs[12])
# Unsure: Is it okay to assume everyone testing against RevisionStorage
# implements undo?
......@@ -142,6 +141,7 @@ class RevisionStorage(object):
def checkLoadBeforeConsecutiveTids(self):
eq = self.assertEqual
oid = self._storage.new_oid()
def helper(tid, revid, x):
data = zodb_pickle(MinPO(x))
t = TransactionMetaData()
......@@ -151,13 +151,13 @@ class RevisionStorage(object):
# Finish the transaction
self._storage.tpc_vote(t)
newrevid = self._storage.tpc_finish(t)
except:
except: # noqa: E722 do not use bare 'except'
self._storage.tpc_abort(t)
raise
return newrevid
revid1 = helper(1, None, 1)
revid2 = helper(2, revid1, 2)
revid3 = helper(3, revid2, 3)
helper(3, revid2, 3)
data, start_tid, end_tid = self._storage.loadBefore(oid, p64(2))
eq(zodb_unpickle(data), MinPO(1))
eq(u64(start_tid), 1)
......@@ -167,7 +167,7 @@ class RevisionStorage(object):
eq = self.assertEqual
oid1 = self._storage.new_oid()
oid2 = self._storage.new_oid()
revid1 = self._dostore(oid1)
self._dostore(oid1)
revid2 = self._dostore(oid2)
results = self._storage.loadBefore(oid2, revid2)
eq(results, None)
......
......@@ -31,6 +31,7 @@ import ZODB.tests.util
ZERO = z64
def snooze():
# In Windows, it's possible that two successive time.time() calls return
# the same value. Tim guarantees that time never runs backwards. You
......@@ -40,6 +41,7 @@ def snooze():
while now == time.time():
time.sleep(0.1)
def _persistent_id(obj):
oid = getattr(obj, "_p_oid", None)
if getattr(oid, "__get__", None) is not None:
......@@ -47,6 +49,7 @@ def _persistent_id(obj):
else:
return oid
def zodb_pickle(obj):
"""Create a pickle in the format expected by ZODB."""
f = BytesIO()
......@@ -65,10 +68,12 @@ def zodb_pickle(obj):
p.dump(state)
return f.getvalue()
def persistent_load(pid):
# helper for zodb_unpickle
return "ref to %s.%s oid=%s" % (pid[1][0], pid[1][1], u64(pid[0]))
def zodb_unpickle(data):
"""Unpickle an object stored using the format expected by ZODB."""
f = BytesIO(data)
......@@ -101,6 +106,7 @@ def zodb_unpickle(data):
inst.__setstate__(state)
return inst
def import_helper(name):
__import__(name)
return sys.modules[name]
......@@ -124,7 +130,8 @@ class StorageTestBase(ZODB.tests.util.TestCase):
ZODB.tests.util.TestCase.tearDown(self)
def _dostore(self, oid=None, revid=None, data=None,
already_pickled=0, user=None, description=None, extension=None):
already_pickled=0, user=None, description=None,
extension=None):
"""Do a complete storage transaction. The defaults are:
- oid=None, ask the storage for a new oid
......@@ -152,11 +159,11 @@ class StorageTestBase(ZODB.tests.util.TestCase):
try:
self._storage.tpc_begin(t)
# Store an object
r1 = self._storage.store(oid, revid, data, '', t)
self._storage.store(oid, revid, data, '', t)
# Finish the transaction
r2 = self._storage.tpc_vote(t)
self._storage.tpc_vote(t)
revid = self._storage.tpc_finish(t)
except:
except: # noqa: E722 do not use bare 'except'
self._storage.tpc_abort(t)
raise
return revid
......
......@@ -69,6 +69,7 @@ OID = "\000" * 8
SERIALNO = "\000" * 8
TID = "\000" * 8
class SynchronizedStorage(object):
def verifyNotCommitting(self, callable, *args):
......
......@@ -21,7 +21,6 @@ from six import PY3
from persistent import Persistent
import transaction
from transaction import Transaction
from ZODB import POSException
from ZODB.Connection import TransactionMetaData
......@@ -37,6 +36,7 @@ from ZODB.tests.StorageTestBase import ZERO
class C(Persistent):
pass
def snooze():
# In Windows, it's possible that two successive time.time() calls return
# the same value. Tim guarantees that time never runs backwards. You
......@@ -46,6 +46,7 @@ def snooze():
while now == time.time():
time.sleep(0.1)
def listeq(L1, L2):
"""Return True if L1.sort() == L2.sort()
......@@ -53,6 +54,7 @@ def listeq(L1, L2):
"""
return sorted(L1) == sorted(L2)
class TransactionalUndoStorage(object):
def _multi_obj_transaction(self, objs):
......@@ -218,7 +220,6 @@ class TransactionalUndoStorage(object):
def checkTwoObjectUndoAtOnce(self):
# Convenience
eq = self.assertEqual
unless = self.assertTrue
p30, p31, p32, p50, p51, p52 = map(zodb_pickle,
map(MinPO,
(30, 31, 32, 50, 51, 52)))
......@@ -470,6 +471,7 @@ class TransactionalUndoStorage(object):
root = cn.root()
pack_times = []
def set_pack_time():
pack_times.append(time.time())
snooze()
......@@ -521,7 +523,6 @@ class TransactionalUndoStorage(object):
cn.close()
db.close()
def checkPackAfterUndoManyTimes(self):
db = DB(self._storage)
cn = db.open()
......@@ -664,7 +665,7 @@ class TransactionalUndoStorage(object):
t = transaction.get()
t.note(u't1')
t.setExtendedInfo('k2', 'this is transaction metadata')
t.setUser(u'u3',path=u'p3')
t.setUser(u'u3', path=u'p3')
db = DB(self._storage)
conn = db.open()
try:
......@@ -673,9 +674,9 @@ class TransactionalUndoStorage(object):
root['obj'] = o1
txn = transaction.get()
txn.commit()
l = self._storage.undoLog()
self.assertEqual(len(l),2)
d = l[0]
log = self._storage.undoLog()
self.assertEqual(len(log), 2)
d = log[0]
self.assertEqual(d['description'], b't1')
self.assertEqual(d['k2'], 'this is transaction metadata')
self.assertEqual(d['user_name'], b'p3 u3')
......@@ -724,7 +725,7 @@ class TransactionalUndoStorage(object):
# Try a slice that doesn't start at 0.
oddball = info_func(first=11, last=17)
self.assertEqual(len(oddball), 17-11)
self.assertEqual(oddball, allofem[11 : 11+len(oddball)])
self.assertEqual(oddball, allofem[11: 11+len(oddball)])
# And another way to spell the same thing.
redundant = info_func(first=11, last=-6)
......@@ -754,10 +755,10 @@ class TransactionalUndoStorage(object):
for i in range(4):
with db.transaction() as conn:
conn.transaction_manager.get().note(
(str if PY3 else unicode)(i))
(str if PY3 else unicode)(i)) # noqa: F821 undef name
conn.root.x.inc()
ids = [l['id'] for l in db.undoLog(1, 3)]
ids = [log['id'] for log in db.undoLog(1, 3)]
if reverse:
ids.reverse()
......
......@@ -23,9 +23,11 @@ from ZODB import DB
from persistent import Persistent
class P(Persistent):
pass
def create_dangling_ref(db):
rt = db.open().root()
......@@ -56,11 +58,13 @@ def create_dangling_ref(db):
transaction.get().note(u"set child on o2")
transaction.commit()
def main():
fs = FileStorage(u"dangle.fs")
db = DB(fs)
create_dangling_ref(db)
db.close()
if __name__ == "__main__":
main()
......@@ -17,17 +17,17 @@ import ZODB.utils
import zope.interface
from binascii import hexlify, unhexlify
@zope.interface.implementer(ZODB.interfaces.IStorageWrapper)
class HexStorage(object):
copied_methods = (
'close', 'getName', 'getSize', 'history', 'isReadOnly',
'lastTransaction', 'new_oid', 'sortKey',
'tpc_abort', 'tpc_begin', 'tpc_finish', 'tpc_vote',
'loadBlob', 'openCommittedBlobFile', 'temporaryDirectory',
'supportsUndo', 'undo', 'undoLog', 'undoInfo',
)
'close', 'getName', 'getSize', 'history', 'isReadOnly',
'lastTransaction', 'new_oid', 'sortKey',
'tpc_abort', 'tpc_begin', 'tpc_finish', 'tpc_vote',
'loadBlob', 'openCommittedBlobFile', 'temporaryDirectory',
'supportsUndo', 'undo', 'undoLog', 'undoInfo',
)
def __init__(self, base):
self.base = base
......@@ -122,6 +122,7 @@ class HexStorage(object):
def copyTransactionsFrom(self, other):
ZODB.blob.copyTransactionsFromTo(other, self)
class ServerHexStorage(HexStorage):
"""Use on ZEO storage server when Hex is used on client
......@@ -132,7 +133,8 @@ class ServerHexStorage(HexStorage):
copied_methods = HexStorage.copied_methods + (
'load', 'loadBefore', 'loadSerial', 'store', 'restore',
'iterator', 'storeBlob', 'restoreBlob', 'record_iternext',
)
)
class Transaction(object):
......@@ -149,6 +151,7 @@ class Transaction(object):
def __getattr__(self, name):
return getattr(self.__trans, name)
class ZConfigHex(object):
_factory = HexStorage
......@@ -161,6 +164,7 @@ class ZConfigHex(object):
base = self.config.base.open()
return self._factory(base)
class ZConfigServerHex(ZConfigHex):
_factory = ServerHexStorage
......@@ -71,6 +71,7 @@ $Id: loggingsupport.py 28349 2004-11-06 00:10:32Z tim_one $
import logging
class Handler(logging.Handler):
def __init__(self, *names, **kw):
......@@ -111,8 +112,8 @@ class Handler(logging.Handler):
if line.strip()])
)
)
for record in self.records]
)
for record in self.records]
)
class InstalledHandler(Handler):
......
......@@ -14,6 +14,7 @@
"""Sample objects for use in tests
"""
class DataManager(object):
"""Sample data manager
......@@ -148,7 +149,7 @@ class DataManager(object):
def _checkTransaction(self, transaction):
if (transaction is not self.transaction
and self.transaction is not None):
and self.transaction is not None):
raise TypeError("Transaction missmatch",
transaction, self.transaction)
......@@ -384,6 +385,7 @@ class DataManager(object):
self.sp += 1
return Rollback(self)
class Rollback(object):
def __init__(self, dm):
......
from __future__ import print_function
import time
import string
import getopt
import os
import ZODB.FileStorage
import ZODB
import sys
import transaction
import persistent
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
......@@ -12,7 +21,7 @@ from __future__ import print_function
# FOR A PARTICULAR PURPOSE
#
##############################################################################
usage="""Test speed of a ZODB storage
usage = """Test speed of a ZODB storage
Options:
......@@ -36,92 +45,100 @@ Options:
-M Output means only
"""
import sys, os, getopt, string, time
sys.path.insert(0, os.getcwd())
import ZODB, ZODB.FileStorage
import persistent
import transaction
class P(persistent.Persistent): pass
class P(persistent.Persistent):
pass
def main(args):
opts, args = getopt.getopt(args, 'zd:n:Ds:LM')
z=s=None
data=sys.argv[0]
nrep=5
minimize=0
detailed=1
z = s = None
data = sys.argv[0]
nrep = 5
minimize = 0
detailed = 1
for o, v in opts:
if o=='-n': nrep=string.atoi(v)
elif o=='-d': data=v
elif o=='-s': s=v
elif o=='-z':
if o == '-n':
nrep = string.atoi(v)
elif o == '-d':
data = v
elif o == '-s':
s = v
elif o == '-z':
global zlib
import zlib
z=compress
elif o=='-L':
minimize=1
elif o=='-M':
detailed=0
elif o=='-D':
z = compress
elif o == '-L':
minimize = 1
elif o == '-M':
detailed = 0
elif o == '-D':
global debug
os.environ['STUPID_LOG_FILE']=''
os.environ['STUPID_LOG_SEVERITY']='-999'
os.environ['STUPID_LOG_FILE'] = ''
os.environ['STUPID_LOG_SEVERITY'] = '-999'
if s:
s=__import__(s, globals(), globals(), ('__doc__',))
s=s.Storage
s = __import__(s, globals(), globals(), ('__doc__',))
s = s.Storage
else:
s=ZODB.FileStorage.FileStorage('zeo_speed.fs', create=1)
s = ZODB.FileStorage.FileStorage('zeo_speed.fs', create=1)
with open(data) as fp:
data = fp.read()
db=ZODB.DB(s,
# disable cache deactivation
cache_size=4000,
cache_deactivate_after=6000,)
db = ZODB.DB(s,
# disable cache deactivation
cache_size=4000,
cache_deactivate_after=6000,)
results={1:0, 10:0, 100:0, 1000:0}
results = {1: 0, 10: 0, 100: 0, 1000: 0}
for j in range(nrep):
for r in 1, 10, 100, 1000:
t=time.time()
jar=db.open()
t = time.time()
jar = db.open()
transaction.begin()
rt=jar.root()
key='s%s' % r
if key in rt: p=rt[key]
else: rt[key]=p=P()
rt = jar.root()
key = 's%s' % r
if key in rt:
p = rt[key]
else:
rt[key] = p = P()
for i in range(r):
if z is not None: d=z(data)
else: d=data
v=getattr(p, str(i), P())
v.d=d
setattr(p,str(i),v)
if z is not None:
d = z(data)
else:
d = data
v = getattr(p, str(i), P())
v.d = d
setattr(p, str(i), v)
transaction.commit()
jar.close()
t=time.time()-t
t = time.time()-t
if detailed:
sys.stderr.write("%s\t%s\t%.4f\n" % (j, r, t))
sys.stdout.flush()
results[r]=results[r]+t
rt=d=p=v=None # release all references
results[r] = results[r]+t
rt = d = p = v = None # release all references
if minimize:
time.sleep(3)
jar.cacheMinimize(3)
if detailed: print('-'*24)
if detailed:
print('-'*24)
for r in 1, 10, 100, 1000:
t=results[r]/nrep
t = results[r]/nrep
sys.stderr.write("mean:\t%s\t%.4f\t%.4f (s/o)\n" % (r, t, t/r))
db.close()
def compress(s):
c=zlib.compressobj()
o=c.compress(s)
c = zlib.compressobj()
o = c.compress(s)
return o+c.flush()
if __name__=='__main__': main(sys.argv[1:])
if __name__ == '__main__':
main(sys.argv[1:])
......@@ -102,6 +102,3 @@ class Tests(unittest.TestCase):
def test_suite():
return unittest.makeSuite(Tests)
if __name__=='__main__':
unittest.main(defaultTest='test_suite')
......@@ -25,6 +25,7 @@ else:
from doctest import DocTestSuite
from ZODB.tests.util import DB, checker
def test_integration():
r"""Test the integration of broken object support with the databse:
......@@ -90,10 +91,9 @@ def test_integration():
>>> ZODB.broken.broken_cache.clear()
"""
def test_suite():
return unittest.TestSuite((
DocTestSuite('ZODB.broken', checker=checker),
DocTestSuite(checker=checker),
))
if __name__ == '__main__': unittest.main()
))
......@@ -40,7 +40,7 @@ class CacheTestBase(ZODB.tests.util.TestCase):
ZODB.tests.util.TestCase.setUp(self)
store = ZODB.MappingStorage.MappingStorage()
self.db = ZODB.DB(store,
cache_size = self.CACHE_SIZE)
cache_size=self.CACHE_SIZE)
self.conns = []
def tearDown(self):
......@@ -73,9 +73,10 @@ class CacheTestBase(ZODB.tests.util.TestCase):
transaction.commit()
# CantGetRidOfMe is used by checkMinimizeTerminates.
make_trouble = True
class CantGetRidOfMe(MinPO):
def __init__(self, value):
MinPO.__init__(self, value)
......@@ -88,6 +89,7 @@ class CantGetRidOfMe(MinPO):
if make_trouble:
self.an_attribute
class DBMethods(CacheTestBase):
def setUp(self):
......@@ -194,6 +196,7 @@ class DBMethods(CacheTestBase):
c = self.conns[0]._cache
c.klass_items()
class LRUCacheTests(CacheTestBase):
def testLRU(self):
......@@ -205,30 +208,30 @@ class LRUCacheTests(CacheTestBase):
self.db.setCacheSize(CACHE_SIZE)
c = self.db.open()
r = c.root()
l = {}
l_ = {}
# the root is the only thing in the cache, because all the
# other objects are new
self.assertEqual(len(c._cache), 1)
# run several transactions
for t in range(5):
for i in range(dataset_size):
l[(t,i)] = r[i] = MinPO(i)
l_[(t, i)] = r[i] = MinPO(i)
transaction.commit()
# commit() will register the objects, placing them in the
# cache. at the end of commit, the cache will be reduced
# down to CACHE_SIZE items
if len(l)>CACHE_SIZE:
if len(l_) > CACHE_SIZE:
self.assertEqual(c._cache.ringlen(), CACHE_SIZE)
for i in range(dataset_size):
# Check objects added in the first two transactions.
# They must all be ghostified.
self.assertEqual(l[(0,i)]._p_changed, None)
self.assertEqual(l[(1,i)]._p_changed, None)
self.assertEqual(l_[(0, i)]._p_changed, None)
self.assertEqual(l_[(1, i)]._p_changed, None)
# Check objects added in the last two transactions.
# They must all still exist in memory, but have
# had their changes flushed
self.assertEqual(l[(3,i)]._p_changed, 0)
self.assertEqual(l[(4,i)]._p_changed, 0)
self.assertEqual(l_[(3, i)]._p_changed, 0)
self.assertEqual(l_[(4, i)]._p_changed, 0)
# Of the objects added in the middle transaction, most
# will have been ghostified. There is one cache slot
# that may be occupied by either one of those objects or
......@@ -257,7 +260,7 @@ class LRUCacheTests(CacheTestBase):
# The cache *usually* contains non-ghosts, so that the
# size normally exceeds the target size.
#self.assertEqual(d['size'], CACHE_SIZE)
# self.assertEqual(d['size'], CACHE_SIZE)
def testDetail(self):
CACHE_SIZE = 10
......@@ -288,7 +291,6 @@ class LRUCacheTests(CacheTestBase):
# This test really needs to be thought through and documented
# better.
for klass, count in self.db.cacheDetail():
if klass.endswith('MinPO'):
self.assertEqual(count, CONNS * CACHE_SIZE)
......@@ -307,13 +309,16 @@ class LRUCacheTests(CacheTestBase):
if details['state'] is None: # i.e., it's a ghost
self.assertTrue(details['rc'] > 0)
class StubDataManager(object):
def setklassstate(self, object):
pass
class StubObject(Persistent):
pass
class CacheErrors(unittest.TestCase):
def setUp(self):
......@@ -449,6 +454,7 @@ class CacheErrors(unittest.TestCase):
else:
self.fail("two objects with the same oid should have failed")
def test_basic_cache_size_estimation():
"""Make sure the basic accounting is correct:
......
......@@ -126,6 +126,7 @@ def database_xrefs_config():
False
"""
def multi_atabases():
r"""If there are multiple codb sections -> multidatabase
......@@ -195,6 +196,7 @@ def multi_atabases():
"""
def test_suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(
......
......@@ -44,7 +44,7 @@ checker = renormalizing.RENormalizing([
(re.compile("ZODB.POSException.ConflictError"), r"ConflictError"),
(re.compile("ZODB.POSException.ConnectionStateError"),
r"ConnectionStateError"),
])
])
class ConnectionDotAdd(ZODB.tests.util.TestCase):
......@@ -131,7 +131,7 @@ class ConnectionDotAdd(ZODB.tests.util.TestCase):
self.assertTrue(obj._p_jar is self.datamgr)
# This next assertTrue is covered by an assert in tpc_finish.
##self.assertTrue(not self.datamgr._added)
# self.assertTrue(not self.datamgr._added)
self.assertEqual(self.db.storage._stored, [oid])
self.assertEqual(self.db.storage._finished, [oid])
......@@ -148,7 +148,7 @@ class ConnectionDotAdd(ZODB.tests.util.TestCase):
storage = self.db.storage
self.assertTrue(obj._p_oid in storage._stored, "object was not stored")
self.assertTrue(subobj._p_oid in storage._stored,
"subobject was not stored")
"subobject was not stored")
self.assertTrue(member._p_oid in storage._stored,
"member was not stored")
self.assertTrue(self.datamgr._added_during_commit is None)
......@@ -176,8 +176,7 @@ class SetstateErrorLoggingTests(ZODB.tests.util.TestCase):
def setUp(self):
ZODB.tests.util.TestCase.setUp(self)
from ZODB.Connection import Connection
self.db = db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
self.db = databaseFromString("<zodb>\n<mappingstorage/>\n</zodb>")
self.datamgr = self.db.open()
self.object = StubObject()
self.datamgr.add(self.object)
......@@ -188,7 +187,6 @@ class SetstateErrorLoggingTests(ZODB.tests.util.TestCase):
self.handler.uninstall()
def test_closed_connection_wont_setstate(self):
oid = self.object._p_oid
self.object._p_deactivate()
self.datamgr.close()
self.assertRaises(
......@@ -476,6 +474,7 @@ class UserMethodTests(unittest.TestCase):
-1
"""
def doctest_transaction_retry_convenience():
"""
Simple test to verify integration with the transaction retry
......@@ -506,6 +505,7 @@ def doctest_transaction_retry_convenience():
0 2
"""
class InvalidationTests(unittest.TestCase):
# It's harder to write serious tests, because some of the critical
......@@ -588,6 +588,7 @@ class InvalidationTests(unittest.TestCase):
c2.root()['b'] = 1
s1 = c1._storage
l1 = s1._lock
@contextmanager
def beforeLock1():
s1._lock = l1
......@@ -601,6 +602,7 @@ class InvalidationTests(unittest.TestCase):
finally:
db.close()
def doctest_invalidateCache():
"""The invalidateCache method invalidates a connection's cache.
......@@ -655,6 +657,7 @@ def doctest_invalidateCache():
>>> db.close()
"""
def doctest_connection_root_convenience():
"""Connection root attributes can now be used as objects with attributes
......@@ -692,10 +695,12 @@ def doctest_connection_root_convenience():
<root: rather_long_name rather_long_name2 rather_long_name4 ...>
"""
class proper_ghost_initialization_with_empty__p_deactivate_class(Persistent):
def _p_deactivate(self):
pass
def doctest_proper_ghost_initialization_with_empty__p_deactivate():
"""
See https://bugs.launchpad.net/zodb/+bug/185066
......@@ -715,6 +720,7 @@ def doctest_proper_ghost_initialization_with_empty__p_deactivate():
"""
def doctest_readCurrent():
r"""
The connection's readCurrent method is called to provide a higher
......@@ -868,6 +874,7 @@ def doctest_readCurrent():
"""
def doctest_cache_management_of_subconnections():
"""Make that cache management works for subconnections.
......@@ -934,6 +941,7 @@ def doctest_cache_management_of_subconnections():
"""
class C_invalidations_of_new_objects_work_after_savepoint(Persistent):
def __init__(self):
self.settings = 1
......@@ -943,7 +951,8 @@ class C_invalidations_of_new_objects_work_after_savepoint(Persistent):
Persistent._p_invalidate(self)
print(self.settings) # POSKeyError here
def doctest_abort_of_savepoint_creating_new_objects_w_exotic_invalidate_doesnt_break():
def doctest_abort_of_savepoint_creating_new_objects_w_exotic_invalidate_doesnt_break(): # noqa: E501 line too long
r"""
Before, the following would fail with a POSKeyError, which was
somewhat surprising, in a very edgy sort of way. :)
......@@ -969,12 +978,14 @@ def doctest_abort_of_savepoint_creating_new_objects_w_exotic_invalidate_doesnt_b
"""
class Clp9460655(Persistent):
def __init__(self, word, id):
super(Clp9460655, self).__init__()
self.id = id
self._word = word
def doctest_lp9460655():
r"""
>>> conn = ZODB.connection(None)
......@@ -1001,6 +1012,7 @@ def doctest_lp9460655():
"""
def doctest_lp615758_transaction_abort_Incomplete_cleanup_for_new_objects():
r"""
......@@ -1022,12 +1034,14 @@ def doctest_lp615758_transaction_abort_Incomplete_cleanup_for_new_objects():
>>> c.close()
"""
class Clp485456_setattr_in_getstate_doesnt_cause_multiple_stores(Persistent):
def __getstate__(self):
self.got = 1
return self.__dict__.copy()
def doctest_lp485456_setattr_in_setstate_doesnt_cause_multiple_stores():
r"""
>>> C = Clp485456_setattr_in_getstate_doesnt_cause_multiple_stores
......@@ -1096,6 +1110,7 @@ class _PlayPersistent(Persistent):
def setValueWithSize(self, size=0): self.value = size*' '
__init__ = setValueWithSize
class EstimatedSizeTests(ZODB.tests.util.TestCase):
"""check that size estimations are handled correctly."""
......@@ -1134,7 +1149,7 @@ class EstimatedSizeTests(ZODB.tests.util.TestCase):
cache_size + new_size - size)
def test_size_set_on_load(self):
c = self.db.open() # new connection
c = self.db.open() # new connection
obj = c.root()['obj']
# the object is still a ghost and '_p_estimated_size' not yet set
# access to unghost
......@@ -1183,7 +1198,6 @@ class EstimatedSizeTests(ZODB.tests.util.TestCase):
)
self.assertEqual(db.getCacheSizeBytes(), 0x1 << 33)
def test_cache_garbage_collection(self):
db = self.db
# activate size based cache garbage collection
......@@ -1203,7 +1217,7 @@ class EstimatedSizeTests(ZODB.tests.util.TestCase):
db = self.db
# activate size based cache garbage collection
db.setCacheSizeBytes(1000)
obj, conn, cache = self.obj, self.conn, self.conn._cache
obj, cache = self.obj, self.conn._cache
# verify the change worked as expected
self.assertEqual(cache.cache_size_bytes, 1000)
# verify our entrance assumption is fulfilled
......@@ -1222,17 +1236,21 @@ class EstimatedSizeTests(ZODB.tests.util.TestCase):
# ---- stubs
class StubObject(Persistent):
pass
class ErrorOnGetstateException(Exception):
pass
class ErrorOnGetstateObject(Persistent):
def __getstate__(self):
raise ErrorOnGetstateException
class ModifyOnGetStateObject(Persistent):
def __init__(self, p):
......@@ -1346,7 +1364,7 @@ class TestConnection(unittest.TestCase):
db = ZODB.DB(None)
conn = db.open()
data = []
conn._storage.afterCompletion = lambda : data.append(None)
conn._storage.afterCompletion = lambda: data.append(None)
conn.transaction_manager.commit()
self.assertEqual(len(data), 1)
conn.close()
......@@ -1359,9 +1377,11 @@ class TestConnection(unittest.TestCase):
storage = MVCCMappingStorage()
new_instance = storage.new_instance
def new_instance2():
inst = new_instance()
sync = inst.sync
def sync2(*args):
sync()
syncs.append(1)
......@@ -1371,8 +1391,8 @@ class TestConnection(unittest.TestCase):
storage.new_instance = new_instance2
db = ZODB.DB(storage)
del syncs[:] # Need to do this to clear effect of getting the
# root object
del syncs[:] # Need to do this to clear effect of getting the
# root object
# We don't want to depend on latest transaction package, so
# just set attr for test:
......@@ -1404,6 +1424,7 @@ class TestConnection(unittest.TestCase):
db.close()
class StubDatabase(object):
def __init__(self):
......@@ -1418,7 +1439,8 @@ class StubDatabase(object):
def invalidate(self, transaction, dict_with_oid_keys, connection):
pass
large_record_size = 1<<30
large_record_size = 1 << 30
def test_suite():
s = unittest.makeSuite(ConnectionDotAdd)
......
......@@ -16,7 +16,6 @@ import persistent.mapping
import re
import transaction
import unittest
import ZODB.tests.util
from zope.testing import renormalizing
checker = renormalizing.RENormalizing([
......@@ -25,7 +24,8 @@ checker = renormalizing.RENormalizing([
# Python 3 adds module name to exceptions.
(re.compile("ZODB.POSException.ConnectionStateError"),
r"ConnectionStateError"),
])
])
def testAddingThenModifyThenAbort():
"""\
......@@ -53,6 +53,7 @@ savepoint.
>>> transaction.abort()
"""
def testModifyThenSavePointThenModifySomeMoreThenCommit():
"""\
We got conflict errors when we committed after we modified an object
......@@ -75,6 +76,7 @@ savepoint storage and *then* to commit the savepoint storage.
>>> transaction.commit()
"""
def testCantCloseConnectionWithActiveSavepoint():
"""
>>> import ZODB.tests.util
......@@ -91,6 +93,7 @@ def testCantCloseConnectionWithActiveSavepoint():
>>> db.close()
"""
def testSavepointDoesCacheGC():
"""\
Although the interface doesn't guarantee this internal detail, making a
......@@ -127,8 +130,8 @@ then, + 1 for the root object:
True
Making a savepoint at this time used to leave the cache holding the same
number of objects. Make sure the cache shrinks now instead. (Implementations that use
weak references, such as PyPy, may need a garbage collection.)
number of objects. Make sure the cache shrinks now instead. (Implementations
that use weak references, such as PyPy, may need a garbage collection.)
>>> dummy = transaction.savepoint()
>>> _ = gc.collect()
......@@ -149,6 +152,7 @@ Verify all the values are as expected:
>>> db.close()
"""
def testIsReadonly():
"""\
The connection isReadonly method relies on the _storage to have an isReadOnly.
......@@ -164,12 +168,14 @@ We simply rely on the underlying storage method.
False
"""
class SelfActivatingObject(persistent.Persistent):
def _p_invalidate(self):
super(SelfActivatingObject, self)._p_invalidate()
self._p_activate()
def testInvalidateAfterRollback():
"""\
The rollback used to invalidate objects before resetting the TmpStore.
......@@ -196,13 +202,15 @@ the wrong state.
def tearDown(test):
transaction.abort()
def test_suite():
return unittest.TestSuite((
doctest.DocFileSuite(
'testConnectionSavepoint.txt',
tearDown=tearDown, checker=checker),
'testConnectionSavepoint.txt',
tearDown=tearDown, checker=checker),
doctest.DocTestSuite(tearDown=tearDown, checker=checker),
))
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
......@@ -15,9 +15,7 @@ from six import PY2
from ZODB.tests.MinPO import MinPO
import doctest
import os
import re
import sys
import time
import transaction
import unittest
......@@ -31,13 +29,14 @@ checker = renormalizing.RENormalizing([
r"\1"),
# Python 3 adds module name to exceptions.
(re.compile("ZODB.POSException.ReadConflictError"), r"ReadConflictError"),
])
])
# Return total number of connections across all pools in a db._pools.
def nconn(pools):
return sum([len(pool.all) for pool in pools.values()])
class DBTests(ZODB.tests.util.TestCase):
def setUp(self):
......@@ -99,16 +98,16 @@ class DBTests(ZODB.tests.util.TestCase):
self.assertEqual(h[name], expect)
if PY2:
expect = unicode if text else str
expect = unicode if text else str # noqa: F821 undef name
for name in 'description', 'user_name':
self.assertTrue(isinstance(h[name], expect))
check(db.storage.history(z64, 3), False)
check(db.storage.undoLog(0, 3) , False)
check(db.storage.undoInfo(0, 3) , False)
check(db.storage.undoLog(0, 3), False)
check(db.storage.undoInfo(0, 3), False)
check(db.history(z64, 3), True)
check(db.undoLog(0, 3) , True)
check(db.undoInfo(0, 3) , True)
check(db.undoLog(0, 3), True)
check(db.undoInfo(0, 3), True)
class TransactionalUndoTests(unittest.TestCase):
......@@ -266,6 +265,7 @@ def test_invalidateCache():
>>> db.close()
"""
def connectionDebugInfo():
r"""DB.connectionDebugInfo provides information about connections.
......@@ -310,11 +310,13 @@ def connectionDebugInfo():
"""
def passing_a_file_name_to_DB():
"""You can pass a file-storage file name to DB.
(Also note that we can access DB in ZODB.)
>>> import os
>>> db = ZODB.DB('data.fs')
>>> db.storage # doctest: +ELLIPSIS
<ZODB.FileStorage.FileStorage.FileStorage object at ...
......@@ -324,6 +326,7 @@ def passing_a_file_name_to_DB():
>>> db.close()
"""
def passing_None_to_DB():
"""You can pass None DB to get a MappingStorage.
......@@ -335,6 +338,7 @@ def passing_None_to_DB():
>>> db.close()
"""
def open_convenience():
"""Often, we just want to open a single connection.
......@@ -372,6 +376,7 @@ def open_convenience():
"""
def db_with_transaction():
"""Using databases with with
......@@ -405,7 +410,7 @@ Let's try again, but this time, we'll have an exception:
>>> with db.transaction() as conn2:
... conn2.root()['y'] = 2
... XXX #doctest: +IGNORE_EXCEPTION_DETAIL
... XXX # noqa: F821 undefined name
Traceback (most recent call last):
...
NameError: name 'XXX' is not defined
......@@ -429,6 +434,7 @@ Let's try again, but this time, we'll have an exception:
>>> db.close()
"""
def connection_allows_empty_version_for_idiots():
r"""
>>> db = ZODB.DB('t.fs')
......@@ -440,6 +446,7 @@ def connection_allows_empty_version_for_idiots():
>>> db.close()
"""
def warn_when_data_records_are_big():
"""
When data records are large, a warning is issued to try to prevent new
......@@ -486,7 +493,8 @@ We can also specify it using a configuration option:
... "object you're saving is large.")
>>> db.close()
""" # '
""" # '
def minimally_test_connection_timeout():
"""There's a mechanism to discard old connections.
......@@ -508,6 +516,7 @@ def minimally_test_connection_timeout():
"""
def cleanup_on_close():
"""Verify that various references are cleared on close
......@@ -533,10 +542,11 @@ def cleanup_on_close():
[]
"""
def test_suite():
s = unittest.defaultTestLoader.loadTestsFromName(__name__)
s.addTest(doctest.DocTestSuite(
setUp=ZODB.tests.util.setUp, tearDown=ZODB.tests.util.tearDown,
checker=checker
))
checker=checker, optionflags=doctest.IGNORE_EXCEPTION_DETAIL
))
return s
......@@ -22,7 +22,7 @@ from ZODB.tests import (
RevisionStorage,
StorageTestBase,
Synchronization,
)
)
import os
if os.environ.get('USE_ZOPE_TESTING_DOCTEST'):
......@@ -30,7 +30,6 @@ if os.environ.get('USE_ZOPE_TESTING_DOCTEST'):
else:
import doctest
import random
import re
import transaction
import unittest
import ZODB.DemoStorage
......@@ -40,7 +39,6 @@ import ZODB.utils
from ZODB.utils import load_current
from zope.testing import renormalizing
class DemoStorageTests(
StorageTestBase.StorageTestBase,
......@@ -53,7 +51,7 @@ class DemoStorageTests(
PackableStorage.PackableStorage,
RevisionStorage.RevisionStorage,
Synchronization.SynchronizedStorage,
):
):
def setUp(self):
StorageTestBase.StorageTestBase.setUp(self)
......@@ -67,7 +65,7 @@ class DemoStorageTests(
def checkLoadDelegation(self):
# Minimal test of loadEX w/o version -- ironically
db = DB(self._storage) # creates object 0. :)
DB(self._storage) # creates object 0. :)
s2 = ZODB.DemoStorage.DemoStorage(base=self._storage)
self.assertEqual(load_current(s2, ZODB.utils.z64),
load_current(self._storage, ZODB.utils.z64))
......@@ -75,7 +73,7 @@ class DemoStorageTests(
def checkLengthAndBool(self):
self.assertEqual(len(self._storage), 0)
self.assertTrue(not self._storage)
db = DB(self._storage) # creates object 0. :)
db = DB(self._storage) # creates object 0. :)
self.assertEqual(len(self._storage), 1)
self.assertTrue(self._storage)
with db.transaction() as conn:
......@@ -86,7 +84,7 @@ class DemoStorageTests(
db.close()
def checkLoadBeforeUndo(self):
pass # we don't support undo yet
pass # we don't support undo yet
checkUndoZombie = checkLoadBeforeUndo
def checkBaseHistory(self):
......@@ -97,6 +95,7 @@ class DemoStorageTests(
self._storage = self._storage.push()
self._checkHistory(base_only())
self._storage = self._storage.pop()
def base_and_changes():
yield 11
yield 12
......@@ -106,6 +105,7 @@ class DemoStorageTests(
self._checkHistory(base_and_changes())
self._storage = self._storage.pop()
class DemoStorageHexTests(DemoStorageTests):
def setUp(self):
......@@ -113,6 +113,7 @@ class DemoStorageHexTests(DemoStorageTests):
self._storage = ZODB.tests.hexstorage.HexStorage(
ZODB.DemoStorage.DemoStorage())
class DemoStorageWrappedBase(DemoStorageTests):
def setUp(self):
......@@ -128,24 +129,27 @@ class DemoStorageWrappedBase(DemoStorageTests):
raise NotImplementedError
def checkPackOnlyOneObject(self):
pass # Wrapping demo storages don't do gc
pass # Wrapping demo storages don't do gc
def checkPackWithMultiDatabaseReferences(self):
pass # we never do gc
pass # we never do gc
checkPackAllRevisions = checkPackWithMultiDatabaseReferences
class DemoStorageWrappedAroundMappingStorage(DemoStorageWrappedBase):
def _makeBaseStorage(self):
from ZODB.MappingStorage import MappingStorage
return MappingStorage()
class DemoStorageWrappedAroundFileStorage(DemoStorageWrappedBase):
def _makeBaseStorage(self):
from ZODB.FileStorage import FileStorage
return FileStorage('FileStorageTests.fs')
class DemoStorageWrappedAroundHexMappingStorage(DemoStorageWrappedBase):
def _makeBaseStorage(self):
......@@ -157,6 +161,7 @@ def setUp(test):
random.seed(0)
ZODB.tests.util.setUp(test)
def testSomeDelegation():
r"""
>>> import six
......@@ -194,11 +199,9 @@ def testSomeDelegation():
>>> storage.tpc_begin(1, 2, 3)
begin 2 3
>>> storage.tpc_abort(1)
>>>
"""
def blob_pos_key_error_with_non_blob_base():
"""
>>> storage = ZODB.DemoStorage.DemoStorage()
......@@ -214,6 +217,7 @@ def blob_pos_key_error_with_non_blob_base():
"""
def load_before_base_storage_current():
"""
Here we'll exercise that DemoStorage's loadBefore method works
......@@ -221,7 +225,6 @@ def load_before_base_storage_current():
base storage.
>>> import time
>>> import transaction
>>> import ZODB.DB
>>> import ZODB.DemoStorage
>>> import ZODB.MappingStorage
......@@ -264,19 +267,20 @@ def load_before_base_storage_current():
>>> base.close()
"""
def test_suite():
suite = unittest.TestSuite((
doctest.DocTestSuite(
setUp=setUp, tearDown=ZODB.tests.util.tearDown,
checker=ZODB.tests.util.checker
),
),
doctest.DocFileSuite(
'../DemoStorage.test',
setUp=setUp,
tearDown=ZODB.tests.util.tearDown,
checker=ZODB.tests.util.checker,
),
))
),
))
suite.addTest(unittest.makeSuite(DemoStorageTests, 'check'))
suite.addTest(unittest.makeSuite(DemoStorageHexTests, 'check'))
suite.addTest(unittest.makeSuite(DemoStorageWrappedAroundFileStorage,
......
......@@ -11,10 +11,12 @@
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import doctest
import os
if os.environ.get('USE_ZOPE_TESTING_DOCTEST'):
from zope.testing import doctest
else:
import doctest
import sys
import unittest
import transaction
......@@ -38,6 +40,7 @@ from ZODB._compat import dump, dumps, _protocol
from . import util
class FileStorageTests(
StorageTestBase.StorageTestBase,
BasicStorage.BasicStorage,
......@@ -54,7 +57,7 @@ class FileStorageTests(
PersistentStorage.PersistentStorage,
MTStorage.MTStorage,
ReadOnlyStorage.ReadOnlyStorage
):
):
use_extension_bytes = True
......@@ -196,9 +199,9 @@ class FileStorageTests(
giant_oid = b'\xee' * 8
# Store an object.
# oid, serial, data, version, transaction
r1 = self._storage.store(giant_oid, b'\0'*8, b'data', b'', t)
self._storage.store(giant_oid, b'\0'*8, b'data', b'', t)
# Finish the transaction.
r2 = self._storage.tpc_vote(t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# Before ZODB 3.2.6, this failed, with ._oid == z64.
self.assertEqual(self._storage._oid, giant_oid)
......@@ -213,9 +216,9 @@ class FileStorageTests(
giant_oid = b'\xee' * 8
# Store an object.
# oid, serial, data, version, prev_txn, transaction
r1 = self._storage.restore(giant_oid, b'\0'*8, b'data', b'', None, t)
self._storage.restore(giant_oid, b'\0'*8, b'data', b'', None, t)
# Finish the transaction.
r2 = self._storage.tpc_vote(t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# Before ZODB 3.2.6, this failed, with ._oid == z64.
self.assertEqual(self._storage._oid, giant_oid)
......@@ -249,7 +252,7 @@ class FileStorageTests(
pos2 = f.tell() - 8
f.seek(pos2)
tlen2 = U64(f.read(8)) # length-8 of the last transaction
pos1 = pos2 - tlen2 + 8 # skip over the tid at the start
pos1 = pos2 - tlen2 + 8 # skip over the tid at the start
f.seek(pos1)
tlen1 = U64(f.read(8)) # should be redundant length-8
self.assertEqual(tlen1, tlen2) # verify that it is redundant
......@@ -264,7 +267,7 @@ class FileStorageTests(
self._storage.pack(time.time(), referencesf)
except CorruptedError as detail:
self.assertTrue("redundant transaction length does not match "
"initial transaction length" in str(detail))
"initial transaction length" in str(detail))
else:
self.fail("expected CorruptedError")
......@@ -344,9 +347,10 @@ class FileStorageTests(
head = stor.tpc_finish(t)
self.assertEqual(head, stor.lastTransaction())
v = list( stor.iterator(start=head, stop=head) )
v = list(stor.iterator(start=head, stop=head))
self.assertEqual(len(v), 1)
trec = v[0] # FileStorage.TransactionRecord or hexstorage.Transaction
# FileStorage.TransactionRecord or hexstorage.Transaction
trec = v[0]
self.assertEqual(trec.tid, head)
self.assertEqual(trec.user, b'')
self.assertEqual(trec.description, description.encode('utf-8'))
......@@ -359,7 +363,7 @@ class FileStorageHexTests(FileStorageTests):
def open(self, **kwargs):
self._storage = ZODB.tests.hexstorage.HexStorage(
ZODB.FileStorage.FileStorage('FileStorageTests.fs',**kwargs))
ZODB.FileStorage.FileStorage('FileStorageTests.fs', **kwargs))
class FileStorageTestsWithBlobsEnabled(FileStorageTests):
......@@ -384,7 +388,7 @@ class FileStorageHexTestsWithBlobsEnabled(FileStorageTests):
class FileStorageRecoveryTest(
StorageTestBase.StorageTestBase,
RecoveryStorage.RecoveryStorage,
):
):
def setUp(self):
StorageTestBase.StorageTestBase.setUp(self)
......@@ -398,6 +402,7 @@ class FileStorageRecoveryTest(
def new_dest(self):
return ZODB.FileStorage.FileStorage('Dest.fs')
class FileStorageHexRecoveryTest(FileStorageRecoveryTest):
def setUp(self):
......@@ -454,6 +459,7 @@ class AnalyzeDotPyTest(StorageTestBase.StorageTestBase):
module.Broken = Broken
oids = [[self._storage.new_oid(), None] for i in range(3)]
def store(i, data):
oid, revid = oids[i]
self._storage.store(oid, revid, data, "", t)
......@@ -495,6 +501,8 @@ class AnalyzeDotPyTest(StorageTestBase.StorageTestBase):
# Raise an exception if the tids in FileStorage fs aren't
# strictly increasing.
def checkIncreasingTids(fs):
lasttid = b'\0' * 8
for txn in fs.iterator():
......@@ -503,6 +511,8 @@ def checkIncreasingTids(fs):
lasttid = txn.tid
# Return a TimeStamp object 'minutes' minutes in the future.
def timestamp(minutes):
import time
from persistent.TimeStamp import TimeStamp
......@@ -510,6 +520,7 @@ def timestamp(minutes):
t = time.time() + 60 * minutes
return TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
def testTimeTravelOnOpen():
"""
>>> from ZODB.FileStorage import FileStorage
......@@ -586,6 +597,7 @@ def testTimeTravelOnOpen():
>>> handler.uninstall()
"""
def lastInvalidations():
"""
......@@ -636,6 +648,7 @@ Of course, calling lastInvalidations on an empty storage refturns no data:
>>> fs.close()
"""
def deal_with_finish_failures():
r"""
......@@ -690,6 +703,7 @@ def deal_with_finish_failures():
>>> db.close()
"""
def pack_with_open_blob_files():
"""
Make sure packing works while there are open blob files.
......@@ -726,6 +740,7 @@ def pack_with_open_blob_files():
>>> db.close()
"""
def readonly_open_nonexistent_file():
"""
Make sure error is reported when non-existent file is tried to be opened
......@@ -739,6 +754,7 @@ def readonly_open_nonexistent_file():
error: ... No such file or directory: 'nonexistent.fs'
"""
def test_suite():
suite = unittest.TestSuite()
for klass in [
......@@ -748,7 +764,7 @@ def test_suite():
FileStorageNoRestoreRecoveryTest,
FileStorageTestsWithBlobsEnabled, FileStorageHexTestsWithBlobsEnabled,
AnalyzeDotPyTest,
]:
]:
suite.addTest(unittest.makeSuite(klass, "check"))
suite.addTest(doctest.DocTestSuite(
setUp=zope.testing.setupstack.setUpDirectory,
......@@ -760,7 +776,7 @@ def test_suite():
ZODB.FileStorage.FileStorage('%s.fs' % name, blob_dir=blob_dir),
test_blob_storage_recovery=True,
test_packing=True,
))
))
suite.addTest(ZODB.tests.testblob.storage_reusable_suite(
'BlobFileHexStorage',
lambda name, blob_dir:
......@@ -768,12 +784,9 @@ def test_suite():
ZODB.FileStorage.FileStorage('%s.fs' % name, blob_dir=blob_dir)),
test_blob_storage_recovery=True,
test_packing=True,
))
))
suite.addTest(PackableStorage.IExternalGC_suite(
lambda : ZODB.FileStorage.FileStorage(
lambda: ZODB.FileStorage.FileStorage(
'data.fs', blob_dir='blobs', pack_gc=False)))
suite.layer = util.MininalTestLayer('testFileStorage')
return suite
if __name__=='__main__':
unittest.main()
......@@ -31,7 +31,8 @@ from ZODB.tests import (
RevisionStorage,
StorageTestBase,
Synchronization,
)
)
class MVCCTests(object):
......@@ -146,7 +147,7 @@ class MVCCMappingStorageTests(
RevisionStorage.RevisionStorage,
Synchronization.SynchronizedStorage,
MVCCTests
):
):
def setUp(self):
self._storage = MVCCMappingStorage()
......@@ -155,13 +156,10 @@ class MVCCMappingStorageTests(
self._storage.close()
def checkLoadBeforeUndo(self):
pass # we don't support undo yet
pass # we don't support undo yet
checkUndoZombie = checkLoadBeforeUndo
def checkTransactionIdIncreases(self):
import time
from ZODB.utils import newTid
from ZODB.TimeStamp import TimeStamp
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.tpc_vote(t)
......@@ -178,10 +176,12 @@ class MVCCMappingStorageTests(
self._storage.tpc_begin(t)
self.assertEqual(self._storage._tid, b'zzzzzzzz')
def create_blob_storage(name, blob_dir):
s = MVCCMappingStorage(name)
return ZODB.blob.BlobStorage(blob_dir, s)
def test_suite():
suite = unittest.makeSuite(MVCCMappingStorageTests, 'check')
# Note: test_packing doesn't work because even though MVCCMappingStorage
......@@ -191,10 +191,5 @@ def test_suite():
suite.addTest(ZODB.tests.testblob.storage_reusable_suite(
'MVCCMapping', create_blob_storage,
test_undo=False,
))
))
return suite
if __name__ == "__main__":
loader = unittest.TestLoader()
loader.testMethodPrefix = "check"
unittest.main(testLoader=loader)
......@@ -26,7 +26,8 @@ from ZODB.tests import (
RevisionStorage,
StorageTestBase,
Synchronization,
)
)
class MappingStorageTests(
StorageTestBase.StorageTestBase,
......@@ -39,7 +40,7 @@ class MappingStorageTests(
PackableStorage.PackableStorageWithOptionalGC,
RevisionStorage.RevisionStorage,
Synchronization.SynchronizedStorage,
):
):
def setUp(self):
StorageTestBase.StorageTestBase.setUp(self, )
......@@ -52,9 +53,10 @@ class MappingStorageTests(
pass
def checkLoadBeforeUndo(self):
pass # we don't support undo yet
pass # we don't support undo yet
checkUndoZombie = checkLoadBeforeUndo
class MappingStorageHexTests(MappingStorageTests):
def setUp(self):
......@@ -62,11 +64,13 @@ class MappingStorageHexTests(MappingStorageTests):
self._storage = ZODB.tests.hexstorage.HexStorage(
ZODB.MappingStorage.MappingStorage())
MockTransaction = namedtuple(
'transaction',
['user', 'description', 'extension']
)
class MappingStorageTransactionRecordTests(unittest.TestCase):
def setUp(self):
......@@ -86,14 +90,11 @@ class MappingStorageTransactionRecordTests(unittest.TestCase):
self._transaction_record._extension
)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MappingStorageTests, 'check'))
suite.addTest(unittest.makeSuite(MappingStorageHexTests, 'check'))
suite.addTest(unittest.makeSuite(MappingStorageTransactionRecordTests, 'check'))
suite.addTest(unittest.makeSuite(
MappingStorageTransactionRecordTests, 'check'))
return suite
if __name__ == "__main__":
loader = unittest.TestLoader()
loader.testMethodPrefix = "check"
unittest.main(testLoader=loader)
......@@ -23,6 +23,7 @@ l0 = []
l1 = [0]
l2 = [0, 1]
class TestPList(unittest.TestCase):
def checkTheWorld(self):
# Test constructors
......@@ -37,11 +38,14 @@ class TestPList(unittest.TestCase):
uu2 = PersistentList(u2)
v = PersistentList(tuple(u))
class OtherList(object):
def __init__(self, initlist):
self.__data = initlist
def __len__(self):
return len(self.__data)
def __getitem__(self, i):
return self.__data[i]
v0 = PersistentList(OtherList(u0))
......@@ -59,16 +63,18 @@ class TestPList(unittest.TestCase):
# Py3: No cmp() or __cmp__ anymore.
if PY2:
def mycmp(a, b):
r = cmp(a, b)
if r < 0: return -1
if r > 0: return 1
r = cmp(a, b) # noqa: F821 undefined name 'cmp'
if r < 0:
return -1
if r > 0:
return 1
return r
all = [l0, l1, l2, u, u0, u1, u2, uu, uu0, uu1, uu2]
all = [l0, l1, l2, u, u0, u1, u2, v, v0, vv, uu, uu0, uu1, uu2]
for a in all:
for b in all:
eq(mycmp(a, b), mycmp(len(a), len(b)),
"mycmp(a, b) == mycmp(len(a), len(b))")
"mycmp(a, b) == mycmp(len(a), len(b))")
# Test __getitem__
......@@ -142,9 +148,9 @@ class TestPList(unittest.TestCase):
# Test __add__, __radd__, __mul__ and __rmul__
#self.assertTrue(u1 + [] == [] + u1 == u1, "u1 + [] == [] + u1 == u1")
# self.assertTrue(u1 + [] == [] + u1 == u1, "u1 + [] == [] + u1 == u1")
self.assertTrue(u1 + [1] == u2, "u1 + [1] == u2")
#self.assertTrue([-1] + u1 == [-1, 0], "[-1] + u1 == [-1, 0]")
# self.assertTrue([-1] + u1 == [-1, 0], "[-1] + u1 == [-1, 0]")
self.assertTrue(u2 == u2*1 == 1*u2, "u2 == u2*1 == 1*u2")
self.assertTrue(u2+u2 == u2*2 == 2*u2, "u2+u2 == u2*2 == 2*u2")
self.assertTrue(u2+u2+u2 == u2*3 == 3*u2, "u2+u2+u2 == u2*3 == 3*u2")
......@@ -181,7 +187,6 @@ class TestPList(unittest.TestCase):
eq(u.count(1), 3, "u.count(1) == 3")
eq(u.count(2), 0, "u.count(2) == 0")
# Test index
eq(u2.index(0), 0, "u2.index(0) == 0")
......@@ -218,10 +223,6 @@ class TestPList(unittest.TestCase):
from ZODB.PersistentList import PersistentList as oldPath
self.assertTrue(oldPath is PersistentList)
def test_suite():
return unittest.makeSuite(TestPList, 'check')
if __name__ == "__main__":
loader = unittest.TestLoader()
loader.testMethodPrefix = "check"
unittest.main(testLoader=loader)
......@@ -34,6 +34,7 @@ from six import PY2
pickle = ('((U\x0bPersistenceq\x01U\x11PersistentMappingtq\x02Nt.}q\x03U\n'
'_containerq\x04}q\x05U\x07versionq\x06U\x03oldq\x07ss.\n')
class PMTests(unittest.TestCase):
def checkOldStyleRoot(self):
......@@ -41,7 +42,7 @@ class PMTests(unittest.TestCase):
# is, but the global `pickle` references it explicitly. So just
# bail if Persistence isn't available.
try:
import Persistence
import Persistence # noqa: F401 'Persistence' imported but unused
except ImportError:
return
# insert the pickle in place of the root
......@@ -129,6 +130,7 @@ class PMTests(unittest.TestCase):
keylist.append(key)
check(keylist)
def find_global(modulename, classname):
"""Helper for this test suite to get special PersistentMapping"""
......@@ -142,8 +144,6 @@ def find_global(modulename, classname):
mod = sys.modules[modulename]
return getattr(mod, classname)
def test_suite():
return unittest.makeSuite(PMTests, 'check')
if __name__ == "__main__":
unittest.main()
......@@ -16,7 +16,6 @@
__docformat__ = "reStructuredText"
def test_weakrefs_functional():
"""Persistent weak references
......@@ -29,7 +28,7 @@ def test_weakrefs_functional():
>>> import transaction
>>> from persistent.wref import WeakRef
>>> import persistent, ZODB.tests.MinPO
>>> import ZODB.tests.MinPO
>>> import ZODB.tests.util
>>> ob = ZODB.tests.MinPO.MinPO()
>>> ref = WeakRef(ob)
......@@ -256,6 +255,7 @@ def test_PersistentWeakKeyDictionary():
"""
def test_PersistentWeakKeyDictionary_get():
"""
>>> import ZODB.tests.util
......@@ -270,6 +270,7 @@ def test_PersistentWeakKeyDictionary_get():
12
"""
def test_suite():
from doctest import DocTestSuite
return DocTestSuite()
......@@ -33,6 +33,7 @@ except ImportError:
# Py3
import io as StringIO
class RecoverTest(ZODB.tests.util.TestCase):
path = None
......@@ -101,8 +102,9 @@ class RecoverTest(ZODB.tests.util.TestCase):
try:
sys.stdout = faux_stdout
try:
ZODB.fsrecover.recover(self.path, self.dest,
verbose=0, partial=True, force=False, pack=1)
ZODB.fsrecover.recover(
self.path, self.dest, verbose=0, partial=True, force=False,
pack=1)
except SystemExit:
raise RuntimeError("recover tried to exit")
finally:
......
......@@ -18,15 +18,16 @@ import unittest
from persistent import Persistent
from persistent.wref import WeakRef
import zope.testing.setupstack
import ZODB.tests.util
from ZODB import serialize
from ZODB._compat import Pickler, PersistentUnpickler, BytesIO, _protocol, IS_JYTHON
from ZODB._compat import Pickler, PersistentUnpickler, BytesIO, _protocol
from ZODB._compat import IS_JYTHON
class PersistentObject(Persistent):
pass
class ClassWithNewargs(int):
def __new__(cls, value):
return int.__new__(cls, value)
......@@ -34,10 +35,12 @@ class ClassWithNewargs(int):
def __getnewargs__(self):
return int(self),
class ClassWithoutNewargs(object):
def __init__(self, value):
self.value = value
def make_pickle(ob):
sio = BytesIO()
p = Pickler(sio, _protocol)
......@@ -48,6 +51,7 @@ def make_pickle(ob):
def _factory(conn, module_name, name):
return globals()[name]
class SerializerTestCase(unittest.TestCase):
# old format: (module, name), None
......@@ -104,6 +108,7 @@ class SerializerTestCase(unittest.TestCase):
class OldStyle(object):
bar = "bar"
def __getattr__(self, name):
if name == "error":
raise ValueError("whee!")
......@@ -112,6 +117,7 @@ class SerializerTestCase(unittest.TestCase):
class NewStyle(object):
bar = "bar"
def _raise(self):
raise ValueError("whee!")
error = property(_raise)
......@@ -150,7 +156,7 @@ class SerializerTestCase(unittest.TestCase):
def test_protocol_3_binary_handling(self):
from ZODB.serialize import _protocol
self.assertEqual(3, _protocol) # Yeah, whitebox
self.assertEqual(3, _protocol) # Yeah, whitebox
o = PersistentObject()
o._p_oid = b'o'
o.o = PersistentObject()
......@@ -161,6 +167,7 @@ class SerializerTestCase(unittest.TestCase):
# SHORT_BINBYTES opcode:
self.assertTrue(b'C\x03o.o' in pickle)
class SerializerFunctestCase(unittest.TestCase):
def setUp(self):
......@@ -188,7 +195,9 @@ class SerializerFunctestCase(unittest.TestCase):
# it can't import '_jythonlib' and the whole process fails
# We would use multiprocessing here, but it doesn't exist on jython
sys_path = [x for x in sys.path
if not x.endswith('Lib') and x != '__classpath__' and x!= '__pyclasspath__/']
if not x.endswith('Lib')
and x != '__classpath__'
and x != '__pyclasspath__/']
else:
sys_path = sys.path
environ['PYTHONPATH'] = os.pathsep.join(sys_path)
......@@ -198,6 +207,7 @@ class SerializerFunctestCase(unittest.TestCase):
'_functest_load(%s)' % repr(fqn)]
subprocess.call(load_args, env=environ)
def _working_failing_datetimes():
import datetime
WORKING = datetime.datetime(5375, 12, 31, 23, 59, 59)
......@@ -205,6 +215,7 @@ def _working_failing_datetimes():
FAILING = datetime.datetime(5376, 12, 31, 23, 59, 59)
return WORKING, FAILING
def _functest_prep(fqn):
# Prepare the database with a BTree which won't deserialize
# if the bug is present.
......@@ -221,10 +232,11 @@ def _functest_prep(fqn):
tree[WORKING] = 'working'
tree[FAILING] = 'failing'
transaction.commit()
finally: # Windoze
finally: # Windoze
conn.close()
db.close()
def _functest_load(fqn):
# Open the database and attempt to deserialize the tree
# (run in separate process)
......@@ -237,10 +249,11 @@ def _functest_load(fqn):
tree = root['tree']
assert tree[WORKING] == 'working'
assert tree[FAILING] == 'failing'
finally: # Windoze
finally: # Windoze
conn.close()
db.close()
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(SerializerTestCase),
......
......@@ -27,17 +27,18 @@ NUM = 100
checker = renormalizing.RENormalizing([
# Python 3 bytes add a "b".
(re.compile("b('.*?')"), r"\1"),
# Windows shows result from 'u64' as long?
(re.compile(r"(\d+)L"), r"\1"),
])
# Python 3 bytes add a "b".
(re.compile("b('.*?')"), r"\1"),
# Windows shows result from 'u64' as long?
(re.compile(r"(\d+)L"), r"\1"),
])
class TestUtils(unittest.TestCase):
small = [random.randrange(1, 1<<32)
small = [random.randrange(1, 1 << 32)
for i in range(NUM)]
large = [random.randrange(1<<32, 1<<64)
large = [random.randrange(1 << 32, 1 << 64)
for i in range(NUM)]
all = small + large
......@@ -51,14 +52,15 @@ class TestUtils(unittest.TestCase):
def test_KnownConstants(self):
self.assertEqual(b"\000\000\000\000\000\000\000\001", p64(1))
self.assertEqual(b"\000\000\000\001\000\000\000\000", p64(1<<32))
self.assertEqual(b"\000\000\000\001\000\000\000\000", p64(1 << 32))
self.assertEqual(u64(b"\000\000\000\000\000\000\000\001"), 1)
self.assertEqual(U64(b"\000\000\000\000\000\000\000\001"), 1)
self.assertEqual(u64(b"\000\000\000\001\000\000\000\000"), 1<<32)
self.assertEqual(U64(b"\000\000\000\001\000\000\000\000"), 1<<32)
self.assertEqual(u64(b"\000\000\000\001\000\000\000\000"), 1 << 32)
self.assertEqual(U64(b"\000\000\000\001\000\000\000\000"), 1 << 32)
def test_PersistentIdHandlesDescriptor(self):
from ZODB.serialize import ObjectWriter
class P(Persistent):
pass
......@@ -126,7 +128,7 @@ class TestUtils(unittest.TestCase):
if HIGHEST_PROTOCOL >= 3:
pickle = dumps(ExampleClass, protocol=3)
self.assertEqual(get_pickle_metadata(pickle),
(__name__, ExampleClass.__name__))
(__name__, ExampleClass.__name__))
def test_p64_bad_object(self):
with self.assertRaises(ValueError) as exc:
......@@ -149,10 +151,10 @@ class TestUtils(unittest.TestCase):
self.assertEqual(e.args[-1], b'123456789')
class ExampleClass(object):
pass
def test_suite():
suite = unittest.defaultTestLoader.loadTestsFromName(__name__)
suite.addTest(
......
......@@ -15,7 +15,6 @@ from persistent import Persistent
from persistent.mapping import PersistentMapping
from ZODB.POSException import TransactionFailedError
import doctest
from BTrees.OOBTree import OOBTree
import transaction
import unittest
......@@ -24,6 +23,7 @@ import ZODB.FileStorage
import ZODB.MappingStorage
import ZODB.tests.util
class P(Persistent):
pass
......@@ -83,7 +83,7 @@ class ZODBTests(ZODB.tests.util.TestCase):
transaction.abort()
else:
transaction.commit()
except:
except: # noqa: E722 do not use bare 'except'
transaction.abort()
raise
......@@ -290,7 +290,7 @@ class ZODBTests(ZODB.tests.util.TestCase):
# Arrange for commit to fail during tpc_vote.
poisoned_jar = PoisonedJar(break_tpc_vote=True)
poisoned = PoisonedObject(poisoned_jar)
PoisonedObject(poisoned_jar)
transaction.get().join(poisoned_jar)
self.assertRaises(PoisonedError, transaction.get().commit)
......@@ -444,10 +444,13 @@ class ZODBTests(ZODB.tests.util.TestCase):
transaction.abort()
conn.close()
class PoisonedError(Exception):
pass
# PoisonedJar arranges to raise PoisonedError from interesting places.
class PoisonedJar(object):
def __init__(self, break_tpc_begin=False, break_tpc_vote=False,
break_savepoint=False):
......@@ -483,10 +486,8 @@ class PoisonedObject(object):
def __init__(self, poisonedjar):
self._p_jar = poisonedjar
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(ZODBTests, 'check'),
))
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
))
......@@ -17,6 +17,7 @@ import warnings
from .._compat import dumps, loads
from ..Connection import TransactionMetaData
class TransactionMetaDataTests(unittest.TestCase):
def test_basic(self):
......@@ -118,8 +119,6 @@ class TransactionMetaDataTests(unittest.TestCase):
t.data(data)
self.assertEqual(c.exception.args, (data,))
def test_suite():
return unittest.makeSuite(TransactionMetaDataTests)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
......@@ -17,6 +17,7 @@ from ZODB.config import databaseFromString
import transaction
import doctest
class RecalcitrantObject(Persistent):
"""A Persistent object that will not become a ghost."""
......@@ -30,6 +31,7 @@ class RecalcitrantObject(Persistent):
init = classmethod(init)
class RegularObject(Persistent):
deactivations = 0
......@@ -49,9 +51,11 @@ class RegularObject(Persistent):
init = classmethod(init)
class PersistentObject(Persistent):
pass
class CacheTests(object):
def test_cache(self):
......@@ -208,6 +212,7 @@ class CacheTests(object):
>>> RegularObject.deactivations
4
"""
def test_gc_on_open_connections(self):
r"""Test that automatic GC is not applied to open connections.
......
......@@ -15,7 +15,7 @@ import doctest
import unittest
__test__ = dict(
cross_db_refs_to_blank_db_name = """
cross_db_refs_to_blank_db_name="""
There was a bug that caused bad refs to be generated is a database
name was blank.
......@@ -41,7 +41,7 @@ __test__ = dict(
>>> db2.close()
>>> db1.close()
""",
)
)
def test_suite():
......
......@@ -65,7 +65,7 @@ Now we see two transactions and two changed objects.
Clean up.
>>> db.close()
"""
""" # noqa: E501 line too long
import re
import doctest
......
......@@ -5,12 +5,14 @@ import ZODB
from .MVCCMappingStorage import MVCCMappingStorage
class PrefetchTests(unittest.TestCase):
def test_prefetch(self):
db = ZODB.DB(None)
fetched = []
def prefetch(oids, tid):
fetched.append((list(map(u64, oids)), tid))
......
......@@ -29,6 +29,7 @@ from ZODB.tests import StorageTestBase
from ZODB.tests import BasicStorage, MTStorage, Synchronization
from ZODB.tests import RevisionStorage
class Transaction(object):
"""Hold data for current transaction for MinimalMemoryStorage."""
......@@ -42,6 +43,7 @@ class Transaction(object):
def cur(self):
return dict.fromkeys([oid for oid, tid in self.index.keys()], self.tid)
class MinimalMemoryStorage(BaseStorage, object):
"""Simple in-memory storage that supports revisions.
......@@ -134,6 +136,7 @@ class MinimalMemoryStorage(BaseStorage, object):
cleanup = close
class MinimalTestSuite(StorageTestBase.StorageTestBase,
BasicStorage.BasicStorage,
MTStorage.MTStorage,
......@@ -150,5 +153,6 @@ class MinimalTestSuite(StorageTestBase.StorageTestBase,
def checkLoadBeforeUndo(self):
pass
def test_suite():
return unittest.makeSuite(MinimalTestSuite, "check")
......@@ -47,6 +47,7 @@ except NameError:
from . import util
def new_time():
"""Create a _new_ time stamp.
......@@ -144,19 +145,19 @@ class BushyLayoutTests(ZODB.tests.util.TestCase):
non_ascii_oid = b'>\xf1<0\xe9Q\x99\xf0'
# The argument should already be bytes;
# os.path.sep is native string type under both 2 and 3
# binascii.hexlify takes bytes and produces bytes under both py2 and py3
# the result should be the native string type
# binascii.hexlify takes bytes and produces bytes under both py2 and
# py3 the result should be the native string type
oid_as_path = BushyLayout().oid_to_path(non_ascii_oid)
self.assertEqual(
oid_as_path,
os.path.sep.join(
'0x3e/0xf1/0x3c/0x30/0xe9/0x51/0x99/0xf0'.split('/')))
'0x3e/0xf1/0x3c/0x30/0xe9/0x51/0x99/0xf0'.split('/')))
# the reverse holds true as well
path_as_oid = BushyLayout().path_to_oid(oid_as_path)
self.assertEqual(
path_as_oid,
non_ascii_oid )
non_ascii_oid)
class BlobTestBase(ZODB.tests.StorageTestBase.StorageTestBase):
......@@ -200,7 +201,6 @@ class BlobUndoTests(BlobTestBase):
file.write(b'this is state 2')
transaction.commit()
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
with blob.open('r') as file:
......@@ -308,7 +308,7 @@ class RecoveryBlobStorage(BlobTestBase,
def testSimpleBlobRecovery(self):
self.assertTrue(
ZODB.interfaces.IBlobStorageRestoreable.providedBy(self._storage)
)
)
db = DB(self._storage)
conn = db.open()
conn.root()[1] = ZODB.blob.Blob()
......@@ -320,10 +320,10 @@ class RecoveryBlobStorage(BlobTestBase,
conn.root()[3] = ZODB.blob.Blob()
with conn.root()[3].open('w') as file:
file.write(
(b''.join(struct.pack(">I", random.randint(0, (1<<32)-1))
for i in range(random.randint(10000,20000)))
)[:-random.randint(1,4)]
)
(b''.join(struct.pack(">I", random.randint(0, (1 << 32)-1))
for i in range(random.randint(10000, 20000)))
)[:-random.randint(1, 4)]
)
transaction.commit()
conn.root()[2] = ZODB.blob.Blob()
with conn.root()[2].open('w') as file:
......@@ -359,6 +359,7 @@ def gc_blob_removes_uncommitted_data():
False
"""
def commit_from_wrong_partition():
"""
It should be possible to commit changes even when a blob is on a
......@@ -379,7 +380,7 @@ def commit_from_wrong_partition():
>>> logger.setLevel(logging.DEBUG)
>>> logger.addHandler(handler)
>>> blob_storage = create_storage()
>>> blob_storage = create_storage() # noqa: F821 undefined name
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
......@@ -425,11 +426,10 @@ def packing_with_uncommitted_data_non_undoing():
blob_directory and confused our packing strategy. We now use a separate
temporary directory that is ignored while packing.
>>> import transaction
>>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
>>> blob_storage = create_storage()
>>> blob_storage = create_storage() # noqa: F821 undefined name
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
......@@ -446,6 +446,7 @@ def packing_with_uncommitted_data_non_undoing():
>>> database.close()
"""
def packing_with_uncommitted_data_undoing():
"""
This covers regression for bug #130459.
......@@ -456,7 +457,7 @@ def packing_with_uncommitted_data_undoing():
>>> from ZODB.serialize import referencesf
>>> blob_storage = create_storage()
>>> blob_storage = create_storage() # noqa: F821 undefined name
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
......@@ -473,9 +474,10 @@ def packing_with_uncommitted_data_undoing():
>>> database.close()
"""
def test_blob_file_permissions():
"""
>>> blob_storage = create_storage()
>>> blob_storage = create_storage() # noqa: F821 undefined name
>>> conn = ZODB.connection(blob_storage)
>>> conn.root.x = ZODB.blob.Blob(b'test')
>>> conn.transaction_manager.commit()
......@@ -498,6 +500,7 @@ def test_blob_file_permissions():
>>> conn.close()
"""
def loadblob_tmpstore():
"""
This is a test for assuring that the TmpStore's loadBlob implementation
......@@ -505,7 +508,7 @@ def loadblob_tmpstore():
First, let's setup a regular database and store a blob:
>>> blob_storage = create_storage()
>>> blob_storage = create_storage() # noqa: F821 undefined name
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
......@@ -537,11 +540,12 @@ def loadblob_tmpstore():
>>> database.close()
"""
def is_blob_record():
r"""
>>> from ZODB.utils import load_current
>>> bs = create_storage()
>>> bs = create_storage() # noqa: F821 undefined name
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root()['blob'] = ZODB.blob.Blob()
......@@ -568,9 +572,10 @@ def is_blob_record():
>>> db.close()
"""
def do_not_depend_on_cwd():
"""
>>> bs = create_storage()
>>> bs = create_storage() # noqa: F821 undefined name
>>> here = os.getcwd()
>>> os.mkdir('evil')
>>> os.chdir('evil')
......@@ -587,10 +592,11 @@ def do_not_depend_on_cwd():
>>> db.close()
"""
def savepoint_isolation():
"""Make sure savepoint data is distinct accross transactions
>>> bs = create_storage()
>>> bs = create_storage() # noqa: F821 undefined name
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root.b = ZODB.blob.Blob(b'initial')
......@@ -615,11 +621,12 @@ def savepoint_isolation():
>>> db.close()
"""
def savepoint_commits_without_invalidations_out_of_order():
"""Make sure transactions with blobs can be commited without the
invalidations out of order error (LP #509801)
>>> bs = create_storage()
>>> bs = create_storage() # noqa: F821 undefined name
>>> db = DB(bs)
>>> tm1 = transaction.TransactionManager()
>>> conn1 = db.open(transaction_manager=tm1)
......@@ -647,10 +654,11 @@ def savepoint_commits_without_invalidations_out_of_order():
>>> db.close()
"""
def savepoint_cleanup():
"""Make sure savepoint data gets cleaned up.
>>> bs = create_storage()
>>> bs = create_storage() # noqa: F821 undefined name
>>> tdir = bs.temporaryDirectory()
>>> os.listdir(tdir)
[]
......@@ -676,6 +684,7 @@ def savepoint_cleanup():
>>> db.close()
"""
def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop():
r"""
>>> db = ZODB.DB('data.fs', blob_dir='blobs')
......@@ -694,19 +703,24 @@ def lp440234_Setting__p_changed_of_a_Blob_w_no_uncomitted_changes_is_noop():
>>> db.close()
"""
def setUp(test):
ZODB.tests.util.setUp(test)
test.globs['rmtree'] = zope.testing.setupstack.rmtree
def timeIncreasesSetUp(test):
setUp(test)
l = test.globs['time_layer'] = ZODB.tests.util.MonotonicallyIncreasingTimeMinimalTestLayer('')
l.testSetUp()
layer = test.globs['time_layer'] = (
ZODB.tests.util.MonotonicallyIncreasingTimeMinimalTestLayer(''))
layer.testSetUp()
def timeIncreasesTearDown(test):
test.globs['time_layer'].testTearDown()
util.tearDown(test)
def setUpBlobAdaptedFileStorage(test):
setUp(test)
......@@ -717,6 +731,7 @@ def setUpBlobAdaptedFileStorage(test):
test.globs['create_storage'] = create_storage
def storage_reusable_suite(prefix, factory,
test_blob_storage_recovery=False,
test_packing=False,
......@@ -729,6 +744,7 @@ def storage_reusable_suite(prefix, factory,
def setup(test):
setUp(test)
def create_storage(name='data', blob_dir=None):
if blob_dir is None:
blob_dir = '%s.bobs' % name
......@@ -749,33 +765,35 @@ def storage_reusable_suite(prefix, factory,
(re.compile(r'^b"'), '"'),
# ...and native strings where Python2 used unicode.
(re.compile("^POSKeyError: u'No blob file"),
"POSKeyError: 'No blob file"),
"POSKeyError: 'No blob file"),
# Py3k repr's exceptions with dotted names
(re.compile("^ZODB.interfaces.BlobError:"), "BlobError:"),
(re.compile("^ZODB.POSException.ConflictError:"), "ConflictError:"),
(re.compile("^ZODB.POSException.ConflictError:"),
"ConflictError:"),
(re.compile("^ZODB.POSException.POSKeyError:"), "POSKeyError:"),
(re.compile("^ZODB.POSException.Unsupported:"), "Unsupported:"),
# Normalize out blobfile paths for sake of Windows
(re.compile(
r'([a-zA-Z]:)?\%(sep)s.*\%(sep)s(server-)?blobs\%(sep)s.*\.blob'
% dict(sep=os.path.sep)), '<BLOB STORAGE PATH>')
]),
r'([a-zA-Z]:)?\%(sep)s.*\%(sep)s(server-)'
r'?blobs\%(sep)s.*\.blob' % dict(sep=os.path.sep)),
'<BLOB STORAGE PATH>')
]),
optionflags=doctest.ELLIPSIS,
))
))
if test_packing:
suite.addTest(doctest.DocFileSuite(
"blob_packing.txt",
setUp=setup, tearDown=util.tearDown,
))
))
suite.addTest(doctest.DocTestSuite(
setUp=setup, tearDown=util.tearDown,
checker = (
checker=(
ZODB.tests.util.checker +
zope.testing.renormalizing.RENormalizing([
(re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
])),
))
])),
))
def create_storage(self, name='data', blob_dir=None):
if blob_dir is None:
......@@ -786,7 +804,7 @@ def storage_reusable_suite(prefix, factory,
new_class = class_.__class__(
prefix+class_.__name__, (class_, ),
dict(create_storage=create_storage),
)
)
suite.addTest(unittest.makeSuite(new_class))
if test_blob_storage_recovery:
......@@ -794,10 +812,12 @@ def storage_reusable_suite(prefix, factory,
if test_undo:
add_test_based_on_test_class(BlobUndoTests)
suite.layer = ZODB.tests.util.MonotonicallyIncreasingTimeMinimalTestLayer(prefix+'BlobTests')
suite.layer = ZODB.tests.util.MonotonicallyIncreasingTimeMinimalTestLayer(
prefix+'BlobTests')
return suite
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ZODBBlobConfigTest))
......@@ -811,36 +831,33 @@ def test_suite():
tearDown=util.tearDown,
optionflags=doctest.ELLIPSIS,
checker=ZODB.tests.util.checker,
))
))
suite.addTest(doctest.DocFileSuite(
"blobstorage_packing.txt",
setUp=timeIncreasesSetUp,
tearDown=timeIncreasesTearDown,
optionflags=doctest.ELLIPSIS,
checker=ZODB.tests.util.checker,
))
))
suite.addTest(doctest.DocFileSuite(
"blob_layout.txt",
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
setUp=setUp,
tearDown=util.tearDown,
checker=ZODB.tests.util.checker +
zope.testing.renormalizing.RENormalizing([
zope.testing.renormalizing.RENormalizing([
(re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
(re.compile(r'\S+/((old|bushy|lawn)/\S+/foo[23456]?)'), r'\1'),
(re.compile(r"u('[^']*')"), r"\1"),
]),
))
]),
))
suite.addTest(storage_reusable_suite(
'BlobAdaptedFileStorage',
lambda name, blob_dir:
ZODB.blob.BlobStorage(blob_dir, FileStorage('%s.fs' % name)),
test_blob_storage_recovery=True,
test_packing=True,
))
))
return suite
if __name__ == '__main__':
unittest.main(defaultTest = 'test_suite')
......@@ -24,12 +24,14 @@ import ZODB.tests.util
import ZODB.POSException
import zope.testing.module
def setUp(test):
ZODB.tests.util.setUp(test)
zope.testing.module.setUp(test, 'ConflictResolution_txt')
ZODB.ConflictResolution._class_cache.clear()
ZODB.ConflictResolution._unresolvable.clear()
def tearDown(test):
zope.testing.module.tearDown(test)
ZODB.tests.util.tearDown(test)
......@@ -51,9 +53,11 @@ class ResolveableWhenStateDoesNotChange(persistent.Persistent):
# 3-way merge
raise ZODB.POSException.ConflictError
class Unresolvable(persistent.Persistent):
pass
def succeed_with_resolution_when_state_is_unchanged():
"""
If a conflicting change doesn't change the state, then we must still call
......@@ -130,6 +134,7 @@ mechanism.
>>> db.close()
"""
class Resolveable(persistent.Persistent):
def _p_resolveConflict(self, old, committed, new):
......@@ -171,6 +176,7 @@ class Resolveable(persistent.Persistent):
return resolved
def resolve_even_when_referenced_classes_are_absent():
"""
......@@ -385,7 +391,7 @@ Cleanup:
>>> handler.uninstall()
>>> db.close()
"""
""" # noqa: E501 line too long
def test_suite():
......@@ -396,8 +402,8 @@ def test_suite():
+ manuel.capture.Manuel(),
'../ConflictResolution.rst',
setUp=setUp, tearDown=tearDown
),
),
doctest.DocTestSuite(
setUp=setUp, tearDown=tearDown,
checker=ZODB.tests.util.checker),
])
])
......@@ -16,14 +16,17 @@ import persistent
import unittest
import ZODB.tests.util
class MyClass(persistent.Persistent):
pass
class MyClass_w_getnewargs(persistent.Persistent):
def __getnewargs__(self):
return ()
def test_must_use_consistent_connections():
"""
......@@ -34,7 +37,7 @@ work.
For example, it's tempting to open a second database using the
database open function, but this doesn't work:
>>> import ZODB.tests.util, transaction, persistent
>>> import ZODB.tests.util, transaction
>>> databases = {}
>>> db1 = ZODB.tests.util.DB(databases=databases, database_name='1')
>>> db2 = ZODB.tests.util.DB(databases=databases, database_name='2')
......@@ -82,6 +85,7 @@ different connections to the same database.
"""
def test_connection_management_doesnt_get_caching_wrong():
"""
......@@ -89,7 +93,7 @@ If a connection participates in a multidatabase, then it's
connections must remain so that references between it's cached
objects remain sane.
>>> import ZODB.tests.util, transaction, persistent
>>> import ZODB.tests.util, transaction
>>> databases = {}
>>> db1 = ZODB.tests.util.DB(databases=databases, database_name='1')
>>> db2 = ZODB.tests.util.DB(databases=databases, database_name='2')
......@@ -125,10 +129,11 @@ if we get the same objects:
>>> db2.close()
"""
def test_explicit_adding_with_savepoint():
"""
>>> import ZODB.tests.util, transaction, persistent
>>> import ZODB.tests.util, transaction
>>> databases = {}
>>> db1 = ZODB.tests.util.DB(databases=databases, database_name='1')
>>> db2 = ZODB.tests.util.DB(databases=databases, database_name='2')
......@@ -150,10 +155,11 @@ def test_explicit_adding_with_savepoint():
"""
def test_explicit_adding_with_savepoint2():
"""
>>> import ZODB.tests.util, transaction, persistent
>>> import ZODB.tests.util, transaction
>>> databases = {}
>>> db1 = ZODB.tests.util.DB(databases=databases, database_name='1')
>>> db2 = ZODB.tests.util.DB(databases=databases, database_name='2')
......@@ -176,27 +182,25 @@ def test_explicit_adding_with_savepoint2():
"""
def tearDownDbs(test):
test.globs['db1'].close()
test.globs['db2'].close()
def test_suite():
return unittest.TestSuite((
doctest.DocFileSuite(
'../cross-database-references.rst',
globs=dict(MyClass=MyClass),
tearDown=tearDownDbs,
checker=ZODB.tests.util.checker,
),
'../cross-database-references.rst',
globs=dict(MyClass=MyClass),
tearDown=tearDownDbs,
checker=ZODB.tests.util.checker,
),
doctest.DocFileSuite(
'../cross-database-references.rst',
globs=dict(MyClass=MyClass_w_getnewargs),
tearDown=tearDownDbs,
checker=ZODB.tests.util.checker,
),
'../cross-database-references.rst',
globs=dict(MyClass=MyClass_w_getnewargs),
tearDown=tearDownDbs,
checker=ZODB.tests.util.checker,
),
doctest.DocTestSuite(checker=ZODB.tests.util.checker),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
))
......@@ -22,19 +22,22 @@ import zope.testing.module
import ZODB
def setUp(test):
test.globs.update(
ZODB=ZODB,
)
)
zope.testing.module.setUp(test)
def tearDown(test):
zope.testing.module.tearDown(test)
def test_suite():
base, src = os.path.split(os.path.dirname(os.path.dirname(ZODB.__file__)))
assert src == 'src', src
base = join(base, 'doc')
base = join(base, 'docs')
guide = join(base, 'guide')
reference = join(base, 'reference')
......@@ -42,15 +45,12 @@ def test_suite():
manuel.testing.TestSuite(
manuel.doctest.Manuel(
optionflags=doctest.IGNORE_EXCEPTION_DETAIL,
) + manuel.capture.Manuel(),
) + manuel.capture.Manuel(),
join(guide, 'writing-persistent-objects.rst'),
join(guide, 'install-and-run.rst'),
join(guide, 'transactions-and-threading.rst'),
join(reference, 'zodb.rst'),
join(reference, 'storages.rst'),
setUp=setUp, tearDown=tearDown,
),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
),
))
......@@ -26,6 +26,7 @@ except NameError:
# Py3: No xrange.
xrange = range
class Test(unittest.TestCase):
def setUp(self):
......@@ -55,16 +56,16 @@ class Test(unittest.TestCase):
def testInserts(self):
index = self.index
for i in range(0,200):
self.assertEqual((i,index[p64(i*1000)]), (i,(i*1000+1)))
for i in range(0, 200):
self.assertEqual((i, index[p64(i*1000)]), (i, (i*1000+1)))
self.assertEqual(len(index), 200)
key=p64(2000)
key = p64(2000)
self.assertEqual(index.get(key), 2001)
key=p64(2001)
key = p64(2001)
self.assertEqual(index.get(key), None)
self.assertEqual(index.get(key, ''), '')
......@@ -72,20 +73,20 @@ class Test(unittest.TestCase):
def testUpdate(self):
index = self.index
d={}
d = {}
for i in range(200):
d[p64(i*1000)]=(i*1000+1)
d[p64(i*1000)] = (i*1000+1)
index.update(d)
for i in range(400,600):
d[p64(i*1000)]=(i*1000+1)
for i in range(400, 600):
d[p64(i*1000)] = (i*1000+1)
index.update(d)
for i in range(100, 500):
d[p64(i*1000)]=(i*1000+2)
d[p64(i*1000)] = (i*1000+2)
index.update(d)
......@@ -194,6 +195,7 @@ class Test(unittest.TestCase):
self.assertEqual(index.minKey(b), c)
self.assertRaises(ValueError, index.minKey, d)
def fsIndex_save_and_load():
"""
fsIndex objects now have save methods for saving them to disk in a new
......@@ -232,6 +234,7 @@ If we save the data in the old format, we can still read it:
"""
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test))
......
......@@ -16,10 +16,11 @@ import manuel.footnote
import manuel.testing
import ZODB.tests.util
def test_suite():
return manuel.testing.TestSuite(
manuel.doctest.Manuel(checker=ZODB.tests.util.checker) +
manuel.footnote.Manuel(),
'../historical_connections.rst',
setUp=ZODB.tests.util.setUp, tearDown=ZODB.tests.util.tearDown,
)
)
......@@ -428,7 +428,8 @@ checker = renormalizing.RENormalizing([
# Python 3 adds module name to exceptions.
(re.compile("ZODB.POSException.ConflictError"), r"ConflictError"),
(re.compile("ZODB.POSException.ReadConflictError"), r"ReadConflictError"),
])
])
def test_suite():
return doctest.DocTestSuite(checker=checker)
......@@ -18,6 +18,7 @@ import unittest
import ZODB.persistentclass
import ZODB.tests.util
def class_with_circular_ref_to_self():
"""
It should be possible for a class to reger to itself.
......@@ -38,6 +39,7 @@ It should be possible for a class to reger to itself.
"""
def test_new_ghost_w_persistent_class():
"""
Peristent meta classes work with PickleCache.new_ghost:
......@@ -67,6 +69,8 @@ def test_new_ghost_w_persistent_class():
"""
# XXX need to update files to get newer testing package
class FakeModule(object):
def __init__(self, name, dict):
self.__dict__ = dict
......@@ -79,19 +83,18 @@ def setUp(test):
module = FakeModule('ZODB.persistentclass_txt', test.globs)
sys.modules[module.__name__] = module
def tearDown(test):
test.globs['some_database'].close()
del sys.modules['ZODB.persistentclass_txt']
ZODB.tests.util.tearDown(test)
def test_suite():
return unittest.TestSuite((
doctest.DocFileSuite(
"../persistentclass.rst",
setUp=setUp, tearDown=tearDown,
checker=ZODB.tests.util.checker),
"../persistentclass.rst",
setUp=setUp, tearDown=tearDown,
checker=ZODB.tests.util.checker),
doctest.DocTestSuite(setUp=setUp, tearDown=tearDown),
))
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
))
......@@ -13,7 +13,8 @@
##############################################################################
"""Conventience function for creating test databases
"""
from ZODB.MappingStorage import DB
# BBB
from ZODB.MappingStorage import DB # noqa: F401 import unused
import atexit
import doctest
......@@ -45,7 +46,6 @@ from time import gmtime as _real_gmtime
_current_time = _real_time()
checker = renormalizing.RENormalizing([
(re.compile("<(.*?) object at 0x[0-9a-f]*?>"),
r"<\1 object at 0x000000000000>"),
......@@ -80,7 +80,8 @@ checker = renormalizing.RENormalizing([
r"Unsupported"),
(re.compile("ZConfig.ConfigurationSyntaxError"),
r"ConfigurationSyntaxError"),
])
])
def setUp(test, name='test'):
clear_transaction_syncs()
......@@ -94,10 +95,12 @@ def setUp(test, name='test'):
os.chdir(d)
zope.testing.setupstack.register(test, transaction.abort)
def tearDown(test):
clear_transaction_syncs()
zope.testing.setupstack.tearDown(test)
class TestCase(unittest.TestCase):
def setUp(self):
......@@ -110,9 +113,11 @@ class TestCase(unittest.TestCase):
tearDown = tearDown
def pack(db):
db.pack(time.time()+1)
class P(persistent.Persistent):
def __init__(self, name=None):
......@@ -121,10 +126,12 @@ class P(persistent.Persistent):
def __repr__(self):
return 'P(%s)' % self.name
class MininalTestLayer(object):
__bases__ = ()
__module__ = ''
def __init__(self, name):
self.__name__ = name
......@@ -142,10 +149,12 @@ class MininalTestLayer(object):
testSetUp = testTearDown = lambda self: None
def clean(tmp):
if os.path.isdir(tmp):
zope.testing.setupstack.rmtree(tmp)
class AAAA_Test_Runner_Hack(unittest.TestCase):
"""Hack to work around a bug in the test runner.
......@@ -157,19 +166,22 @@ class AAAA_Test_Runner_Hack(unittest.TestCase):
def testNothing(self):
pass
def assert_warning(category, func, warning_text=''):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('default')
result = func()
for warning in w:
if ((warning.category is category)
and (warning_text in str(warning.message))):
and (warning_text in str(warning.message))):
return result
raise AssertionError(w)
def assert_deprecated(func, warning_text=''):
return assert_warning(DeprecationWarning, func, warning_text)
def wait(func=None, timeout=30):
if func is None:
return lambda f: wait(f, timeout)
......@@ -179,6 +191,7 @@ def wait(func=None, timeout=30):
time.sleep(.01)
raise AssertionError
def store(storage, oid, value='x', serial=ZODB.utils.z64):
if not isinstance(oid, bytes):
oid = ZODB.utils.p64(oid)
......@@ -190,8 +203,10 @@ def store(storage, oid, value='x', serial=ZODB.utils.z64):
storage.tpc_vote(t)
storage.tpc_finish(t)
def mess_with_time(test=None, globs=None, now=1278864701.5):
now = [now]
def faux_time():
now[0] += 1
return now[0]
......@@ -204,11 +219,12 @@ def mess_with_time(test=None, globs=None, now=1278864701.5):
import time
zope.testing.setupstack.register(test, setattr, time, 'time', time.time)
if isinstance(time,type):
time.time = staticmethod(faux_time) # jython
if isinstance(time, type):
time.time = staticmethod(faux_time) # jython
else:
time.time = faux_time
def clear_transaction_syncs():
"""Clear data managers registered with the global transaction manager
......@@ -236,9 +252,10 @@ class _TimeWrapper(object):
def _configure_fakes(self):
def incr():
global _current_time # pylint:disable=global-statement
global _current_time # pylint:disable=global-statement
with self._lock:
_current_time = max(_real_time(), _current_time + self._granularity)
_current_time = max(
_real_time(), _current_time + self._granularity)
return _current_time
self.fake_time.side_effect = incr
......@@ -329,7 +346,7 @@ def reset_monotonic_time(value=0.0):
call.
"""
global _current_time # pylint:disable=global-statement
global _current_time # pylint:disable=global-statement
_current_time = value
......@@ -346,7 +363,8 @@ class MonotonicallyIncreasingTimeMinimalTestLayer(MininalTestLayer):
def with_high_concurrency(f):
"""
with_high_concurrency decorates f to run with high frequency of thread context switches.
with_high_concurrency decorates f to run with high frequency of thread
context switches.
It is useful for tests that try to probabilistically reproduce race
condition scenarios.
......@@ -357,19 +375,23 @@ def with_high_concurrency(f):
# Python3, by default, switches every 5ms, which turns threads in
# intended "high concurrency" scenarios to execute almost serially.
# Raise the frequency of context switches in order to increase the
# probability to reproduce interesting/tricky overlapping of threads.
# probability to reproduce interesting/tricky overlapping of
# threads.
#
# See https://github.com/zopefoundation/ZODB/pull/345#issuecomment-822188305 and
# https://github.com/zopefoundation/ZEO/issues/168#issuecomment-821829116 for details.
# See https://github.com/zopefoundation/ZODB/pull/345#issuecomment-822188305 and # noqa: E501 line too long
# https://github.com/zopefoundation/ZEO/issues/168#issuecomment-821829116 for details. # noqa: E501 line too long
_ = sys.getswitchinterval()
def restore():
sys.setswitchinterval(_)
sys.setswitchinterval(5e-6) # ~ 100 simple instructions on modern hardware
# ~ 100 simple instructions on modern hardware
sys.setswitchinterval(5e-6)
else:
# Python2, by default, switches threads every "100 instructions".
# Just make sure we run f with that default.
_ = sys.getcheckinterval()
def restore():
sys.setcheckinterval(_)
sys.setcheckinterval(100)
......@@ -404,4 +426,4 @@ def run_module_as_script(mod, args, stdout="stdout", stderr="stderr"):
sys.stdout.close()
sys.stderr.close()
pdb.set_trace = s_set_trace
sys.argv, sys.stdout, sys.stderr = sargv, sout, serr
sys.argv, sys.stdout, sys.stderr = sargv, sout, serr
......@@ -13,6 +13,7 @@
##############################################################################
import warnings
class WarningsHook(object):
"""Hook to capture warnings generated by Python.
......
......@@ -16,12 +16,14 @@
from ZODB.POSException import ReadConflictError, ConflictError
import transaction
def _commit(note):
t = transaction.get()
if note:
t.note(note)
t.commit()
def transact(f, note=None, retries=5):
"""Returns transactional version of function argument f.
......@@ -42,7 +44,7 @@ def transact(f, note=None, retries=5):
n -= 1
try:
r = f(*args, **kwargs)
except ReadConflictError as msg:
except ReadConflictError:
# the only way ReadConflictError can happen here is due to
# simultaneous pack removing objects revision that f could try
# to load.
......@@ -52,7 +54,7 @@ def transact(f, note=None, retries=5):
continue
try:
_commit(note)
except ConflictError as msg:
except ConflictError:
transaction.abort()
if not n:
raise
......
......@@ -43,7 +43,7 @@ __all__ = ['z64',
'readable_tid_repr',
'get_pickle_metadata',
'locked',
]
]
if PY2:
......@@ -70,7 +70,7 @@ else:
return bytes.decode("ascii")
def byte_ord(byte):
return byte # elements of bytes are already ints
return byte # elements of bytes are already ints
def byte_chr(int):
return bytes((int,))
......@@ -96,6 +96,7 @@ def p64(v):
except struct.error as e:
raise ValueError(*(e.args + (v,)))
def u64(v):
"""Unpack an 8-byte string into a 64-bit long integer."""
try:
......@@ -103,6 +104,7 @@ def u64(v):
except struct.error as e:
raise ValueError(*(e.args + (v,)))
U64 = u64
......@@ -121,7 +123,7 @@ def cp(f1, f2, length=None, bufsize=64 * 1024):
if length is None:
old_pos = f1.tell()
f1.seek(0,2)
f1.seek(0, 2)
length = f1.tell()
f1.seek(old_pos)
......@@ -134,9 +136,10 @@ def cp(f1, f2, length=None, bufsize=64 * 1024):
write(data)
length -= len(data)
def newTid(old):
t = time.time()
ts = TimeStamp(*time.gmtime(t)[:5]+(t%60,))
ts = TimeStamp(*time.gmtime(t)[:5]+(t % 60,))
if old is not None:
ts = ts.laterThan(TimeStamp(old))
return ts.raw()
......@@ -155,6 +158,7 @@ def oid_repr(oid):
else:
return repr(oid)
def repr_to_oid(repr):
repr = ascii_bytes(repr)
if repr.startswith(b"0x"):
......@@ -163,12 +167,15 @@ def repr_to_oid(repr):
as_bin = b"\x00"*(8-len(as_bin)) + as_bin
return as_bin
serial_repr = oid_repr
tid_repr = serial_repr
# For example, produce
# '0x03441422948b4399 2002-04-14 20:50:34.815000'
# for 8-byte string tid b'\x03D\x14"\x94\x8bC\x99'.
def readable_tid_repr(tid):
result = tid_repr(tid)
if isinstance(tid, bytes) and len(tid) == 8:
......@@ -184,7 +191,10 @@ def readable_tid_repr(tid):
# a negative address gives a positive int with the same hex representation as
# the significant bits in the original.
_ADDRESS_MASK = 256 ** struct.calcsize('P')
def positive_id(obj):
"""Return id(obj) as a non-negative integer."""
......@@ -201,6 +211,7 @@ def positive_id(obj):
# docs to be at least as smart. The code here doesn't appear to make sense
# for what serialize.py calls formats 5 and 6.
def get_pickle_metadata(data):
# Returns a 2-tuple of strings.
......@@ -209,7 +220,7 @@ def get_pickle_metadata(data):
# pick apart the first here, to extract the module and class names.
if data[0] in (0x80, # Py3k indexes bytes -> int
b'\x80' # Python2 indexes bytes -> bytes
): # protocol marker, protocol > 1
): # protocol marker, protocol > 1
data = data[2:]
if data.startswith(b'(c'): # pickle MARK GLOBAL opcode sequence
global_prefix = 2
......@@ -233,7 +244,7 @@ def get_pickle_metadata(data):
u = Unpickler(f)
try:
class_info = u.load()
except Exception as err:
except Exception:
return '', ''
if isinstance(class_info, tuple):
if isinstance(class_info[0], tuple):
......@@ -248,18 +259,21 @@ def get_pickle_metadata(data):
classname = ''
return modname, classname
def mktemp(dir=None, prefix='tmp'):
"""Create a temp file, known by name, in a semi-secure manner."""
handle, filename = mkstemp(dir=dir, prefix=prefix)
os.close(handle)
return filename
def check_precondition(precondition):
if not precondition():
raise AssertionError(
"Failed precondition: ",
precondition.__doc__.strip())
class Locked(object):
def __init__(self, func, inst=None, class_=None, preconditions=()):
......@@ -287,6 +301,7 @@ class Locked(object):
return func(*args, **kw)
class locked(object):
def __init__(self, *preconditions):
......@@ -302,7 +317,7 @@ class locked(object):
return Locked(func, preconditions=self.preconditions)
if os.environ.get('DEBUG_LOCKING'): # pragma: no cover
if os.environ.get('DEBUG_LOCKING'): # pragma: no cover
# NOTE: This only works on Python 3.
class Lock(object):
......@@ -363,10 +378,11 @@ if os.environ.get('DEBUG_LOCKING'): # pragma: no cover
else:
from threading import Condition, Lock, RLock
from threading import Condition, Lock, RLock # noqa: F401 import unused
import ZODB.POSException # noqa: E402 module level import not at top of file
import ZODB.POSException
def load_current(storage, oid, version=''):
"""Load the most recent revision of an object by calling loadBefore
......
"""Work around an issue with defining class attribute documentation.
See http://stackoverflow.com/questions/9153473/sphinx-values-for-attributes-reported-as-none/39276413
"""
""" # noqa: E501 line too long
class ValueDoc(object):
......@@ -10,4 +11,3 @@ class ValueDoc(object):
def __repr__(self):
return self.text
# Generated from:
# https://github.com/zopefoundation/meta/tree/master/config/pure-python
[tox]
envlist = py27,py35,py36,py37,py38,pypy,pypy3,py38-pure,docs
minversion = 3.18
envlist =
lint
py27
py35
py36
py37
py38
py39
py310
pypy
pypy3
docs
coverage
py38-pure
[testenv]
# ZODB.tests.testdocumentation needs to find
# itself in the source tree to locate the doc/
# directory. 'usedevelop' is more like what
# buildout.cfg does, and is simpler than having
# testdocumentation.py also understand how to climb
# out of the tox site-packages.
usedevelop = true
commands =
zope-testrunner --test-path=src []
deps =
.[test]
setenv =
ZOPE_INTERFACE_STRICT_IRO = 1
[testenv:coverage]
basepython = python3.7
ZOPE_INTERFACE_STRICT_IRO=1
commands =
coverage run {envdir}/bin/zope-testrunner --all --test-path=src []
coverage combine
coverage report
deps =
{[testenv]deps}
coverage
zope-testrunner --test-path=src {posargs:-vc}
extras =
test
[testenv:py38-pure]
basepython =
python3.8
basepython = python3.8
setenv =
PURE_PYTHON = 1
[testenv:lint]
basepython = python3
skip_install = true
deps =
flake8
check-manifest
check-python-versions >= 0.19.1
wheel
commands =
flake8 src setup.py
check-manifest
check-python-versions
[testenv:docs]
basepython =
python3.7
basepython = python3
skip_install = false
extras =
docs
commands_pre =
commands =
sphinx-build -b html -d doc/_build/doctrees doc doc/_build/html
sphinx-build -d doc/_build/doctrees doc doc/_build/doctest
sphinx-build -b html -d docs/_build/doctrees docs docs/_build/html
[testenv:coverage]
basepython = python3
allowlist_externals =
mkdir
deps =
--requirement doc/requirements.txt
coverage
coverage-python-version
commands =
mkdir -p {toxinidir}/parts/htmlcov
coverage run -m zope.testrunner --test-path=src {posargs:-vc}
coverage html
coverage report -m --fail-under=80
[coverage:run]
branch = True
plugins = coverage_python_version
source = ZODB
[coverage:report]
precision = 2
exclude_lines =
pragma: no cover
pragma: nocover
except ImportError:
raise NotImplementedError
if __name__ == '__main__':
self.fail
raise AssertionError
[coverage:html]
directory = parts/htmlcov
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment