Commit 6f887812 authored by Michael Howitz's avatar Michael Howitz Committed by GitHub

Drop support for Python < 3.7 (#386)

* Bumped version for breaking release.

* Drop support for Python 2.7, 3.5, 3.6.

---------
Co-authored-by: default avatarJens Vagelpohl <jens@plyp.com>
parent 198bfdf4
......@@ -22,24 +22,19 @@ jobs:
config:
# [Python version, tox env]
- ["3.9", "lint"]
- ["2.7", "py27"]
- ["3.5", "py35"]
- ["3.6", "py36"]
- ["3.7", "py37"]
- ["3.8", "py38"]
- ["3.9", "py39"]
- ["3.10", "py310"]
- ["3.11", "py311"]
- ["pypy-2.7", "pypy"]
- ["pypy-3.7", "pypy3"]
- ["pypy-3.9", "pypy3"]
- ["3.9", "docs"]
- ["3.9", "coverage"]
- ["3.8", "py38-pure"]
- ["3.10", "py310-pure"]
exclude:
- { os: ["windows", "windows-latest"], config: ["3.9", "lint"] }
- { os: ["windows", "windows-latest"], config: ["3.9", "docs"] }
- { os: ["windows", "windows-latest"], config: ["3.9", "coverage"] }
- { os: ["windows", "windows-latest"], config: ["pypy-2.7", "pypy"] }
runs-on: ${{ matrix.os[1] }}
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
......@@ -67,7 +62,7 @@ jobs:
- name: Coverage
if: matrix.config[1] == 'coverage'
run: |
pip install coveralls coverage-python-version
pip install coveralls
coveralls --service=github
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
......@@ -2,15 +2,15 @@
# https://github.com/zopefoundation/meta/tree/master/config/pure-python
[meta]
template = "pure-python"
commit-id = "b4dd6f9ffd3d6a2cde7dc70512c62d4c7ed22cd6"
commit-id = "b99ba750"
[python]
with-windows = true
with-pypy = true
with-future-python = false
with-legacy-python = true
with-docs = true
with-sphinx-doctests = false
with-macos = false
[tox]
use-flake8 = true
......@@ -21,12 +21,12 @@ testenv-setenv = [
"ZOPE_INTERFACE_STRICT_IRO=1",
]
additional-envlist = [
"py38-pure",
"py310-pure",
]
testenv-additional = [
"",
"[testenv:py38-pure]",
"basepython = python3.8",
"[testenv:py310-pure]",
"basepython = python3.10",
"setenv =",
" PURE_PYTHON = 1",
]
......@@ -67,8 +67,5 @@ additional-ignores = [
[github-actions]
additional-config = [
"- [\"3.8\", \"py38-pure\"]",
]
additional-exclude = [
"- { os: windows, config: [\"pypy-2.7\", \"pypy\"] }",
"- [\"3.10\", \"py310-pure\"]",
]
......@@ -2,8 +2,10 @@
Change History
================
5.8.2 (unreleased)
==================
6.0 (unreleased)
================
- Drop support for Python 2.7, 3.5, 3.6.
5.8.1 (2023-07-18)
......
# Generated from:
# https://github.com/zopefoundation/meta/tree/master/config/pure-python
[bdist_wheel]
universal = 1
universal = 0
[flake8]
doctests = 1
......@@ -26,7 +26,7 @@ ignore =
force_single_line = True
combine_as_imports = True
sections = FUTURE,STDLIB,THIRDPARTY,ZOPE,FIRSTPARTY,LOCALFOLDER
known_third_party = six, docutils, pkg_resources
known_third_party = docutils, pkg_resources, pytz
known_zope =
known_first_party =
default_section = ZOPE
......
......@@ -15,30 +15,7 @@ from setuptools import find_packages
from setuptools import setup
version = '5.8.2.dev0'
classifiers = """\
Intended Audience :: Developers
License :: OSI Approved :: Zope Public License
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Database
Topic :: Software Development :: Libraries :: Python Modules
Operating System :: Microsoft :: Windows
Operating System :: Unix
Framework :: ZODB
"""
version = '6.0.dev0'
def read(path):
......@@ -50,7 +27,6 @@ long_description = read("README.rst") + "\n\n" + read("CHANGES.rst")
tests_require = [
'manuel',
'mock; python_version == "2.7"',
'zope.testing',
'zope.testrunner >= 4.4.6',
]
......@@ -61,14 +37,31 @@ setup(
author="Jim Fulton",
author_email="jim@zope.com",
maintainer="Zope Foundation and Contributors",
maintainer_email="zodb-dev@zope.org",
maintainer_email="zodb-dev@zope.dev",
keywords="database nosql python zope",
packages=find_packages('src'),
package_dir={'': 'src'},
url='http://zodb-docs.readthedocs.io',
license="ZPL 2.1",
platforms=["any"],
classifiers=list(filter(None, classifiers.split("\n"))),
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Zope Public License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Database",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: Microsoft :: Windows",
"Operating System :: Unix",
"Framework :: ZODB",
],
description=long_description.split('\n', 2)[1],
long_description=long_description,
tests_require=tests_require,
......@@ -87,7 +80,6 @@ setup(
'BTrees >= 4.2.0',
'ZConfig',
'transaction >= 2.4',
'six',
'zc.lockfile',
'zope.interface',
'zodbpickle >= 1.0.1',
......@@ -102,5 +94,5 @@ setup(
repozo = ZODB.scripts.repozo:main
""",
include_package_data=True,
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
python_requires='>=3.7',
)
......@@ -19,7 +19,7 @@ import time
from . import utils
class ActivityMonitor(object):
class ActivityMonitor:
"""ZODB load/store activity monitor
This simple implementation just keeps a small log in memory
......
......@@ -16,7 +16,6 @@
The base class here is tightly coupled with its subclasses and
its use is not recommended. It's still here for historical reasons.
"""
from __future__ import print_function
import logging
import time
......@@ -30,7 +29,6 @@ import ZODB.interfaces
from . import POSException
from . import utils
from ._compat import py2_hasattr
from .Connection import TransactionMetaData
from .UndoLogCompatible import UndoLogCompatible
from .utils import byte_chr
......@@ -310,7 +308,12 @@ def copy(source, dest, verbose=0):
# using store(). However, if we use store, then
# copyTransactionsFrom() may fail with VersionLockError or
# ConflictError.
restoring = py2_hasattr(dest, 'restore')
try:
getattr(dest, 'restore')
except: # noqa: E722 do not use bare 'except'
restoring = False
else:
restoring = True
fiter = source.iterator()
for transaction in fiter:
tid = transaction.tid
......@@ -320,14 +323,14 @@ def copy(source, dest, verbose=0):
t = TimeStamp(tid)
if t <= _ts:
if ok:
print(('Time stamps out of order %s, %s' % (_ts, t)))
print('Time stamps out of order {}, {}'.format(_ts, t))
ok = 0
_ts = t.laterThan(_ts)
tid = _ts.raw()
else:
_ts = t
if not ok:
print(('Time stamps back in order %s' % (t)))
print('Time stamps back in order %s' % (t))
ok = 1
if verbose:
......@@ -376,7 +379,7 @@ class TransactionRecord(TransactionMetaData):
@zope.interface.implementer(ZODB.interfaces.IStorageRecordInformation)
class DataRecord(object):
class DataRecord:
"""Abstract base class for iterator protocol"""
version = ''
......
......@@ -13,16 +13,11 @@
##############################################################################
import logging
# Subtle: Python 2.x has pickle.PicklingError and cPickle.PicklingError,
# and these are unrelated classes! So we shouldn't use pickle.PicklingError,
# since on Python 2, ZODB._compat.pickle is cPickle.
from io import BytesIO
from pickle import PicklingError
import six
import zope.interface
from ZODB._compat import BytesIO
from ZODB._compat import PersistentPickler
from ZODB._compat import PersistentUnpickler
from ZODB._compat import _protocol
......@@ -37,7 +32,7 @@ class BadClassName(Exception):
pass
class BadClass(object):
class BadClass:
def __init__(self, *args):
self.args = args
......@@ -68,8 +63,8 @@ def find_global(*args):
if cls == 1:
# Not importable
if (isinstance(args, tuple) and len(args) == 2 and
isinstance(args[0], six.string_types) and
isinstance(args[1], six.string_types)):
isinstance(args[0], str) and
isinstance(args[1], str)):
return BadClass(*args)
else:
raise BadClassName(*args)
......@@ -125,7 +120,7 @@ class IPersistentReference(zope.interface.Interface):
@zope.interface.implementer(IPersistentReference)
class PersistentReference(object):
class PersistentReference:
weak = False
oid = database_name = klass = None
......@@ -169,7 +164,7 @@ class PersistentReference(object):
self.weak = True
if not isinstance(self.oid, (bytes, type(None))):
assert isinstance(self.oid, str)
# this happens on Python 3 when all bytes in the oid are < 0x80
# this happens when all bytes in the oid are < 0x80
self.oid = self.oid.encode('ascii')
def __cmp__(self, other):
......@@ -185,8 +180,6 @@ class PersistentReference(object):
"can't reliably compare against different "
"PersistentReferences")
# Python 3 dropped __cmp__
def __eq__(self, other):
return self.__cmp__(other) == 0
......@@ -206,7 +199,7 @@ class PersistentReference(object):
return self.__cmp__(other) >= 0
def __repr__(self):
return "PR(%s %s)" % (id(self), self.data)
return "PR({} {})".format(id(self), self.data)
def __getstate__(self):
raise PicklingError("Can't pickle PersistentReference")
......@@ -221,7 +214,7 @@ class PersistentReference(object):
return data[1][2]
class PersistentReferenceFactory(object):
class PersistentReferenceFactory:
data = None
......@@ -311,7 +304,7 @@ def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
data=newpickle)
class ConflictResolvingStorage(object):
class ConflictResolvingStorage:
"Mix-in class that provides conflict resolution handling for storages"
tryToResolveConflict = tryToResolveConflict
......@@ -323,7 +316,7 @@ class ConflictResolvingStorage(object):
self._crs_untransform_record_data = wrapper.untransform_record_data
self._crs_transform_record_data = wrapper.transform_record_data
try:
m = super(ConflictResolvingStorage, self).registerDB
m = super().registerDB
except AttributeError:
pass
else:
......
......@@ -231,7 +231,7 @@ And now we will make a conflict.
>>> tm_A.commit() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ConflictError: database conflict error...
ZODB.POSException.ConflictError: database conflict error...
oops!
......@@ -352,7 +352,7 @@ resolution.
>>> tm_A.commit() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ConflictError: database conflict error...
ZODB.POSException.ConflictError: database conflict error...
>>> tm_A.abort()
Third, note that, even if the persistent object to which the reference refers
......@@ -395,7 +395,7 @@ the situation above.
>>> tm_A.commit() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ConflictError: database conflict error...
ZODB.POSException.ConflictError: database conflict error...
>>> tm_A.abort()
However, the story highlights the kinds of subtle problems that units
......@@ -479,7 +479,7 @@ integrity issues.
>>> ref1 = PersistentReference(b'my_oid')
>>> ref1.oid
'my_oid'
b'my_oid'
>>> print(ref1.klass)
None
>>> print(ref1.database_name)
......@@ -489,7 +489,7 @@ integrity issues.
>>> ref2 = PersistentReference((b'my_oid', 'my_class'))
>>> ref2.oid
'my_oid'
b'my_oid'
>>> ref2.klass
'my_class'
>>> print(ref2.database_name)
......@@ -499,7 +499,7 @@ integrity issues.
>>> ref3 = PersistentReference(['w', (b'my_oid',)])
>>> ref3.oid
'my_oid'
b'my_oid'
>>> print(ref3.klass)
None
>>> print(ref3.database_name)
......@@ -509,7 +509,7 @@ integrity issues.
>>> ref3a = PersistentReference(['w', (b'my_oid', 'other_db')])
>>> ref3a.oid
'my_oid'
b'my_oid'
>>> print(ref3a.klass)
None
>>> ref3a.database_name
......@@ -519,7 +519,7 @@ integrity issues.
>>> ref4 = PersistentReference(['m', ('other_db', b'my_oid', 'my_class')])
>>> ref4.oid
'my_oid'
b'my_oid'
>>> ref4.klass
'my_class'
>>> ref4.database_name
......@@ -529,7 +529,7 @@ integrity issues.
>>> ref5 = PersistentReference(['n', ('other_db', b'my_oid')])
>>> ref5.oid
'my_oid'
b'my_oid'
>>> print(ref5.klass)
None
>>> ref5.database_name
......@@ -539,7 +539,7 @@ integrity issues.
>>> ref6 = PersistentReference([b'my_oid']) # legacy
>>> ref6.oid
'my_oid'
b'my_oid'
>>> print(ref6.klass)
None
>>> print(ref6.database_name)
......
......@@ -13,7 +13,6 @@
##############################################################################
"""Database connection support
"""
from __future__ import print_function
import logging
import os
......@@ -21,8 +20,6 @@ import tempfile
import time
import warnings
import six
import transaction
from persistent import PickleCache
# interfaces
......@@ -86,14 +83,14 @@ def resetCaches():
def className(obj):
cls = type(obj)
return "%s.%s" % (cls.__module__, cls.__name__)
return "{}.{}".format(cls.__module__, cls.__name__)
@implementer(IConnection,
ISavepointDataManager,
IPersistentDataManager,
ISynchronizer)
class Connection(ExportImport, object):
class Connection(ExportImport):
"""Connection to ZODB for loading and storing objects.
Connections manage object state in collaboration with transaction
......@@ -260,14 +257,14 @@ class Connection(ExportImport, object):
def cacheMinimize(self):
"""Deactivate all unmodified objects in the cache.
"""
for connection in six.itervalues(self.connections):
for connection in self.connections.values():
connection._cache.minimize()
# TODO: we should test what happens when cacheGC is called mid-transaction.
def cacheGC(self):
"""Reduce cache size to target size.
"""
for connection in six.itervalues(self.connections):
for connection in self.connections.values():
connection._cache.incrgc()
__onCloseCallbacks = None
......@@ -496,7 +493,7 @@ class Connection(ExportImport, object):
else:
self._commit(transaction)
for oid, serial in six.iteritems(self._readCurrent):
for oid, serial in self._readCurrent.items():
try:
self._storage.checkCurrentSerialInTransaction(
oid, serial, transaction)
......@@ -715,7 +712,7 @@ class Connection(ExportImport, object):
def sortKey(self):
"""Return a consistent sort key for this connection."""
return "%s:%s" % (self._storage.sortKey(), id(self))
return "{}:{}".format(self._storage.sortKey(), id(self))
# Data manager (ISavepointDataManager) methods
##########################################################################
......@@ -939,7 +936,7 @@ class Connection(ExportImport, object):
self._reader._cache = cache
def _release_resources(self):
for c in six.itervalues(self.connections):
for c in self.connections.values():
if c._storage is not None:
c._storage.release()
c._storage = c._normal_storage = None
......@@ -1004,8 +1001,8 @@ class Connection(ExportImport, object):
src = self._storage
# Invalidate objects created *after* the savepoint.
self._invalidate_creating((oid for oid in src.creating
if oid not in state[2]))
self._invalidate_creating(oid for oid in src.creating
if oid not in state[2])
index = src.index
src.reset(*state)
self._cache.invalidate(index)
......@@ -1105,7 +1102,7 @@ class Connection(ExportImport, object):
@implementer(IDataManagerSavepoint)
class Savepoint(object):
class Savepoint:
def __init__(self, datamanager, state):
self.datamanager = datamanager
......@@ -1116,7 +1113,7 @@ class Savepoint(object):
@implementer(IBlobStorage)
class TmpStore(object):
class TmpStore:
"""A storage-like thing to support savepoints."""
def __init__(self, storage):
......@@ -1220,8 +1217,8 @@ class TmpStore(object):
def _getCleanFilename(self, oid, tid):
return os.path.join(
self._getBlobPath(),
"%s-%s%s" % (utils.oid_repr(oid), utils.tid_repr(tid),
SAVEPOINT_SUFFIX,)
"{}-{}{}".format(utils.oid_repr(oid), utils.tid_repr(tid),
SAVEPOINT_SUFFIX)
)
def temporaryDirectory(self):
......@@ -1243,7 +1240,7 @@ class TmpStore(object):
self.creating = creating
class RootConvenience(object):
class RootConvenience:
def __init__(self, root):
self.__dict__['_root'] = root
......@@ -1294,7 +1291,7 @@ size.
"""
class overridable_property(object):
class overridable_property:
"""
Same as property() with only a getter, except that setting a
value overrides the property rather than raising AttributeError.
......@@ -1309,9 +1306,9 @@ class overridable_property(object):
@implementer(IStorageTransactionMetaData)
class TransactionMetaData(object):
class TransactionMetaData:
def __init__(self, user=u'', description=u'', extension=None):
def __init__(self, user='', description='', extension=None):
if not isinstance(user, bytes):
user = user.encode('utf-8')
self.user = user
......
......@@ -13,7 +13,6 @@
##############################################################################
"""Database objects
"""
from __future__ import print_function
import datetime
import logging
......@@ -23,8 +22,6 @@ import warnings
import weakref
from itertools import chain
import six
import transaction
from persistent.TimeStamp import TimeStamp
from zope.interface import implementer
......@@ -44,7 +41,7 @@ from ZODB.utils import z64
logger = logging.getLogger('ZODB.DB')
class AbstractConnectionPool(object):
class AbstractConnectionPool:
"""Manage a pool of connections.
CAUTION: Methods should be called under the protection of a lock.
......@@ -115,7 +112,7 @@ class AbstractConnectionPool(object):
class ConnectionPool(AbstractConnectionPool):
def __init__(self, size, timeout=1 << 31):
super(ConnectionPool, self).__init__(size, timeout)
super().__init__(size, timeout)
# A stack of connections available to hand out. This is a subset
# of self.all. push() and repush() add to this, and may remove
......@@ -247,7 +244,7 @@ class KeyedConnectionPool(AbstractConnectionPool):
# see the comments in ConnectionPool for method descriptions.
def __init__(self, size, timeout=1 << 31):
super(KeyedConnectionPool, self).__init__(size, timeout)
super().__init__(size, timeout)
self.pools = {}
def __iter__(self):
......@@ -324,7 +321,7 @@ def getTID(at, before):
@implementer(IDatabase)
class DB(object):
class DB:
"""The Object Database
The DB class coordinates the activities of multiple database
......@@ -428,7 +425,7 @@ class DB(object):
self._historical_cache_size_bytes = historical_cache_size_bytes
# Setup storage
if isinstance(storage, six.string_types):
if isinstance(storage, str):
from ZODB import FileStorage # noqa: F401 import unused
storage = ZODB.FileStorage.FileStorage(storage, **storage_args)
elif storage is None:
......@@ -469,7 +466,7 @@ class DB(object):
self.large_record_size = large_record_size
# Make sure we have a root:
with self.transaction(u'initial database creation') as conn:
with self.transaction('initial database creation') as conn:
try:
conn.get(z64)
except KeyError:
......@@ -516,7 +513,7 @@ class DB(object):
for oid, ob in con._cache.items():
module = getattr(ob.__class__, '__module__', '')
module = module and '%s.' % module or ''
c = "%s%s" % (module, ob.__class__.__name__)
c = "{}{}".format(module, ob.__class__.__name__)
if c in detail:
detail[c] += 1
else:
......@@ -571,7 +568,7 @@ class DB(object):
'conn_no': cn,
'oid': oid,
'id': id,
'klass': "%s%s" % (module, ob.__class__.__name__),
'klass': "{}{}".format(module, ob.__class__.__name__),
'rc': (rc(ob) - 3 - (ob._p_changed is not None)
if rc else False),
'state': ob._p_changed,
......@@ -621,7 +618,6 @@ class DB(object):
'ngsize': con._cache.cache_non_ghost_count,
'size': len(con._cache)})
self._connectionMap(f)
# Py3: Simulate Python 2 m.sort() functionality.
return sorted(
m, key=lambda x: (x['connection'], x['ngsize'], x['size']))
......@@ -643,7 +639,7 @@ class DB(object):
@self._connectionMap
def _(conn):
if conn.transaction_manager is not None:
for c in six.itervalues(conn.connections):
for c in conn.connections.values():
# Prevent connections from implicitly starting new
# transactions.
c.explicit_transactions = True
......@@ -743,7 +739,7 @@ class DB(object):
raise ValueError(
'cannot open an historical connection in the future.')
if isinstance(transaction_manager, six.string_types):
if isinstance(transaction_manager, str):
if transaction_manager:
raise TypeError("Versions aren't supported.")
warnings.warn(
......@@ -806,11 +802,11 @@ class DB(object):
d = d[0]
else:
d = ''
d = "%s (%s)" % (d, len(c._cache))
d = "{} ({})".format(d, len(c._cache))
# output UTC time with the standard Z time zone indicator
result.append({
'opened': o and ("%s (%.2fs)" % (
'opened': o and ("{} ({:.2f}s)".format(
time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(o)),
t-o)),
'info': d,
......@@ -960,7 +956,7 @@ class DB(object):
raise NotImplementedError
if txn is None:
txn = transaction.get()
if isinstance(ids, six.string_types):
if isinstance(ids, str):
ids = [ids]
txn.join(TransactionalUndo(self, ids))
......@@ -1013,7 +1009,7 @@ class DB(object):
return conn
class ContextManager(object):
class ContextManager:
"""PEP 343 context manager
"""
......@@ -1041,7 +1037,7 @@ resource_counter_lock = utils.Lock()
resource_counter = 0
class TransactionalUndo(object):
class TransactionalUndo:
def __init__(self, db, tids):
self._db = db
......@@ -1114,7 +1110,7 @@ class TransactionalUndo(object):
# not open yet. Fortunately new_instances of a storage are
# supposed to return the same sort key as the original storage
# did.
return "%s:%s" % (self._db._mvcc_storage.sortKey(), id(self))
return "{}:{}".format(self._db._mvcc_storage.sortKey(), id(self))
def connection(*args, **kw):
......
......@@ -19,7 +19,6 @@ to be layered over a base database.
The base storage must not change.
"""
from __future__ import print_function
import os
import random
......@@ -127,7 +126,7 @@ class DemoStorage(ConflictResolvingStorage):
self._transaction = None
if name is None:
name = 'DemoStorage(%r, %r)' % (base.getName(), changes.getName())
name = f'DemoStorage({base.getName()!r}, {changes.getName()!r})'
self.__name__ = name
self._copy_methods_from_changes(changes)
......@@ -204,10 +203,8 @@ class DemoStorage(ConflictResolvingStorage):
return r
def iterator(self, start=None, end=None):
for t in self.base.iterator(start, end):
yield t
for t in self.changes.iterator(start, end):
yield t
yield from self.base.iterator(start, end)
yield from self.changes.iterator(start, end)
def lastTransaction(self):
t = self.changes.lastTransaction()
......
......@@ -224,13 +224,13 @@ DemoStorage supports Blobs if the changes database supports blobs.
>>> db = DB(storage)
>>> conn = db.open()
>>> with conn.root()['blob'].open() as fp: fp.read()
'state 1'
b'state 1'
>>> _ = transaction.begin()
>>> with conn.root()['blob'].open('w') as file:
... _ = file.write(b'state 2')
>>> transaction.commit()
>>> with conn.root()['blob'].open() as fp: fp.read()
'state 2'
b'state 2'
>>> storage.temporaryDirectory() == changes.temporaryDirectory()
True
......@@ -245,14 +245,14 @@ It isn't necessary for the base database to support blobs.
>>> db = DB(storage)
>>> conn = db.open()
>>> with conn.root()['blob'].open() as fp: fp.read()
'state 2'
b'state 2'
>>> _ = transaction.begin()
>>> conn.root()['blob2'] = ZODB.blob.Blob()
>>> with conn.root()['blob2'].open('w') as file:
... _ = file.write(b'state 1')
>>> with conn.root()['blob2'].open() as fp: fp.read()
'state 1'
b'state 1'
>>> db.close()
......@@ -269,7 +269,7 @@ storage wrapped around it when necessary:
>>> db = DB(storage)
>>> conn = db.open()
>>> with conn.root()['blob'].open() as fp: fp.read()
'state 1'
b'state 1'
>>> type(storage.changes).__name__
'BlobStorage'
......@@ -279,7 +279,7 @@ storage wrapped around it when necessary:
... _ = file.write(b'state 2')
>>> transaction.commit()
>>> with conn.root()['blob'].open() as fp: fp.read()
'state 2'
b'state 2'
>>> storage.temporaryDirectory() == storage.changes.temporaryDirectory()
True
......@@ -308,7 +308,7 @@ This works even if we first write a blob rather than read a blob:
'BlobStorage'
>>> with conn.root()['blob'].open() as fp: fp.read()
'state 2'
b'state 2'
>>> storage.temporaryDirectory() == storage.changes.temporaryDirectory()
True
......
......@@ -15,11 +15,9 @@
import logging
import os
from io import BytesIO
from tempfile import TemporaryFile
import six
from ZODB._compat import BytesIO
from ZODB._compat import PersistentPickler
from ZODB._compat import Unpickler
from ZODB._compat import _protocol
......@@ -36,12 +34,12 @@ from ZODB.utils import u64
logger = logging.getLogger('ZODB.ExportImport')
class ExportImport(object):
class ExportImport:
def exportFile(self, oid, f=None, bufsize=64 * 1024):
if f is None:
f = TemporaryFile(prefix="EXP")
elif isinstance(f, six.string_types):
elif isinstance(f, str):
f = open(f, 'w+b')
f.write(b'ZEXP')
oids = [oid]
......@@ -79,7 +77,7 @@ class ExportImport(object):
def importFile(self, f, clue='', customImporters=None):
# This is tricky, because we need to work in a transaction!
if isinstance(f, six.string_types):
if isinstance(f, str):
with open(f, 'rb') as fp:
return self.importFile(fp, clue=clue,
customImporters=customImporters)
......@@ -127,7 +125,7 @@ class ExportImport(object):
if not isinstance(ooid, bytes):
assert isinstance(ooid, str)
# this happens on Python 3 when all bytes in the oid are < 0x80
# this happens when all bytes in the oid are < 0x80
ooid = ooid.encode('ascii')
if ooid in oids:
......@@ -204,7 +202,7 @@ export_end_marker = b'\377'*16
blob_begin_marker = b'\000BLOBSTART'
class Ghost(object):
class Ghost:
__slots__ = ("oid",)
def __init__(self, oid):
......
......@@ -13,7 +13,6 @@
##############################################################################
"""Storage implementation using a log written to a single file.
"""
from __future__ import print_function
import binascii
import contextlib
......@@ -21,11 +20,11 @@ import errno
import logging
import os
import time
from base64 import decodebytes
from base64 import encodebytes
from struct import pack
from struct import unpack
from six import string_types as STRING_TYPES
from persistent.TimeStamp import TimeStamp
from zc.lockfile import LockFile
from zope.interface import alsoProvides
......@@ -34,8 +33,6 @@ from zope.interface import implementer
from ZODB._compat import FILESTORAGE_MAGIC
from ZODB._compat import Pickler
from ZODB._compat import _protocol
from ZODB._compat import decodebytes
from ZODB._compat import encodebytes
from ZODB._compat import loads
from ZODB.BaseStorage import BaseStorage
from ZODB.BaseStorage import DataRecord as _DataRecord
......@@ -275,7 +272,7 @@ class FileStorage(
if not create:
try:
self._file = open(file_name, read_only and 'rb' or 'r+b')
except IOError as exc:
except OSError as exc:
if exc.errno == errno.EFBIG:
# The file is too big to open. Fail visibly.
raise
......@@ -536,7 +533,7 @@ class FileStorage(
except KeyError:
raise POSKeyError(oid)
except TypeError:
raise TypeError("invalid oid %r" % (oid,))
raise TypeError("invalid oid {!r}".format(oid))
load = load_current # Keep load for now for old clients
......@@ -1580,7 +1577,7 @@ def recover(file_name):
file.truncate(npos)
print("Recovered file, lost %s, ended up with %s bytes" % (
print("Recovered file, lost {}, ended up with {} bytes".format(
pos-opos, npos))
......@@ -1776,7 +1773,7 @@ def _truncate(file, name, pos):
try:
i = 0
while 1:
oname = '%s.tr%s' % (name, i)
oname = '{}.tr{}'.format(name, i)
if os.path.exists(oname):
i += 1
else:
......@@ -1802,7 +1799,7 @@ class FileIterator(FileStorageFormatter):
_file = None
def __init__(self, filename, start=None, stop=None, pos=4):
assert isinstance(filename, STRING_TYPES)
assert isinstance(filename, str)
file = open(filename, 'rb')
self._file = file
self._file_name = filename
......@@ -1825,7 +1822,7 @@ class FileIterator(FileStorageFormatter):
def __len__(self):
# Define a bogus __len__() to make the iterator work
# with code like builtin list() and tuple() in Python 2.1.
# with code like builtin list() and tuple().
# There's a lot of C code that expects a sequence to have
# an __len__() but can cope with any sort of mistake in its
# implementation. So just return 0.
......@@ -2084,11 +2081,11 @@ class TransactionRecordIterator(FileStorageFormatter):
class Record(_DataRecord):
def __init__(self, oid, tid, data, prev, pos):
super(Record, self).__init__(oid, tid, data, prev)
super().__init__(oid, tid, data, prev)
self.pos = pos
class UndoSearch(object):
class UndoSearch:
def __init__(self, file, pos, first, last, filter=None):
self.file = file
......@@ -2150,7 +2147,7 @@ class UndoSearch(object):
return d
class FilePool(object):
class FilePool:
closed = False
writing = False
......@@ -2215,8 +2212,7 @@ class FilePool(object):
This is required if they contain data of rolled back transactions.
"""
# Unfortunately, Python 3.x has no API to flush read buffers, and
# the API is ineffective in Python 2 on Mac OS X.
# Unfortunately, Python has no API to flush read buffers.
with self.write_lock():
self.empty()
......
......@@ -86,7 +86,6 @@
import logging
import struct
from ZODB._compat import PY3
from ZODB.POSException import POSKeyError
from ZODB.utils import as_bytes
from ZODB.utils import oid_repr
......@@ -106,8 +105,8 @@ class CorruptedDataError(CorruptedError):
def __str__(self):
if self.oid:
msg = "Error reading oid %s. Found %r" % (oid_repr(self.oid),
self.buf)
msg = "Error reading oid {}. Found {!r}".format(
oid_repr(self.oid), self.buf)
else:
msg = "Error reading unknown oid. Found %r" % self.buf
if self.pos:
......@@ -127,7 +126,7 @@ assert struct.calcsize(DATA_HDR) == DATA_HDR_LEN
logger = logging.getLogger('ZODB.FileStorage.format')
class FileStorageFormatter(object):
class FileStorageFormatter:
"""Mixin class that can read and write the low-level format."""
# subclasses must provide _file
......@@ -243,7 +242,7 @@ def DataHeaderFromString(s):
return DataHeader(*struct.unpack(DATA_HDR, s))
class DataHeader(object):
class DataHeader:
"""Header for a data record."""
__slots__ = ("oid", "tid", "prev", "tloc", "plen", "back")
......@@ -270,12 +269,11 @@ class DataHeader(object):
def TxnHeaderFromString(s):
res = TxnHeader(*struct.unpack(TRANS_HDR, s))
if PY3:
res.status = res.status.decode('ascii')
res.status = res.status.decode('ascii')
return res
class TxnHeader(object):
class TxnHeader:
"""Header for a transaction record."""
__slots__ = ("tid", "tlen", "status", "user", "descr", "ext",
......
from __future__ import print_function
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
......@@ -46,7 +44,7 @@ def fsdump(path, file=None, with_offset=1):
else:
modname, classname = get_pickle_metadata(rec.data)
size = " size=%d" % len(rec.data)
fullclass = "%s.%s" % (modname, classname)
fullclass = "{}.{}".format(modname, classname)
if rec.data_txn:
# It would be nice to print the transaction number
......@@ -65,7 +63,7 @@ def fmt(p64):
return "%016x" % u64(p64)
class Dumper(object):
class Dumper:
"""A very verbose dumper for debugging FileStorage problems."""
# TODO: Should revise this class to use FileStorageFormatter.
......
......@@ -11,7 +11,6 @@
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from __future__ import print_function
import ZODB.FileStorage
from ZODB.serialize import get_refs
......@@ -45,7 +44,7 @@ def shorten(s, size=50):
return s[:nleading] + sep + s[-ntrailing:]
class Tracer(object):
class Tracer:
"""Trace all occurrences of a set of oids in a FileStorage.
Create passing a path to an existing FileStorage.
......@@ -208,7 +207,7 @@ class Tracer(object):
elif isinstance(klass, tuple):
ref2name[ref] = klass = "%s.%s" % klass
else:
klass = "%s.%s" % (klass.__module__, klass.__name__)
klass = f"{klass.__module__}.{klass.__name__}"
self._msg(oid, tid, "references", oid_repr(ref), klass,
"at", pos)
......@@ -441,7 +441,7 @@ class FileStoragePacker(FileStorageFormatter):
self._copier = PackCopier(self._tfile, self.index, self.tindex)
ipos, opos = self.copyToPacktime()
except (OSError, IOError):
except OSError:
# most probably ran out of disk space or some other IO error
close_files_remove()
raise # don't succeed silently
......@@ -486,7 +486,7 @@ class FileStoragePacker(FileStorageFormatter):
self.blob_removed.close()
return pos
except (OSError, IOError):
except OSError:
# most probably ran out of disk space or some other IO error
close_files_remove()
if self.locked:
......
......@@ -27,12 +27,6 @@ from ZODB.Connection import TransactionMetaData
checker = renormalizing.RENormalizing([
# Python 3 bytes add a "b".
(re.compile("b('.*?')"), r"\1"),
# Python 3 adds module name to exceptions.
(re.compile("ZODB.POSException.POSKeyError"), r"POSKeyError"),
(re.compile("ZODB.FileStorage.FileStorage.FileStorageQuotaError"),
"FileStorageQuotaError"),
(re.compile('data.fs:[0-9]+'), 'data.fs:<OFFSET>'),
])
......@@ -143,8 +137,9 @@ def pack_with_repeated_blob_records():
>>> db.pack()
>>> conn.sync()
>>> with conn.root()[1].open() as fp: fp.read()
'some data'
>>> with conn.root()[1].open() as fp:
... fp.read()
b'some data'
>>> db.close()
"""
......
......@@ -67,7 +67,7 @@ create
>>> fs.load(b'\0'*8)
Traceback (most recent call last):
...
POSKeyError: 0x00
ZODB.POSException.POSKeyError: 0x00
>>> sorted(os.listdir('blobs'))
['.layout', 'tmp']
......@@ -104,7 +104,7 @@ quota
>>> db = ZODB.DB(fs) # writes object 0
Traceback (most recent call last):
...
FileStorageQuotaError: The storage quota has been exceeded.
ZODB.FileStorage.FileStorage.FileStorageQuotaError: The storage quota has been exceeded.
>>> fs.close()
......@@ -116,9 +116,8 @@ packer
To demonstrate this, we'll create a null packer that just prints
some information about it's arguments:
>>> import six
>>> def packer(storage, referencesf, stop, gc):
... six.print_(referencesf, storage is fs, gc, storage.pack_keep_old)
... print(referencesf, storage is fs, gc, storage.pack_keep_old)
>>> ZODB.FileStorage.config_demo_printing_packer = packer
>>> fs = ZODB.config.storageFromString("""
......@@ -141,8 +140,8 @@ packer
>>> def packer_factory(name):
... def packer(storage, referencesf, stop, gc):
... six.print_(repr(name), referencesf, storage is fs,
... gc, storage.pack_keep_old)
... print(repr(name), referencesf, storage is fs,
... gc, storage.pack_keep_old)
... return packer
>>> ZODB.FileStorage.config_demo_printing_packer_factory = packer_factory
......@@ -184,7 +183,7 @@ pack-gc
>>> fs.pack(time.time(), 42, gc=True)
42 True True True
>>> fs.close()
pack-keep-old
......@@ -200,10 +199,10 @@ pack-keep-old
>>> fs.pack(time.time(), 42)
42 True True False
>>> fs.close()
......@@ -33,7 +33,7 @@ import ZODB.utils
ZODB.interfaces.IStorage,
ZODB.interfaces.IStorageIteration,
)
class MappingStorage(object):
class MappingStorage:
"""In-memory storage implementation
Note that this implementation is somewhat naive and inefficient
......@@ -136,8 +136,7 @@ class MappingStorage(object):
# ZODB.interfaces.IStorageIteration
def iterator(self, start=None, end=None):
for transaction_record in self._transactions.values(start, end):
yield transaction_record
yield from self._transactions.values(start, end)
# ZODB.interfaces.IStorage
@ZODB.utils.locked(opened)
......@@ -219,7 +218,7 @@ class MappingStorage(object):
if gc:
# Step 2, GC. A simple sweep+copy
new_data = BTrees.OOBTree.OOBTree()
to_copy = set([ZODB.utils.z64])
to_copy = {ZODB.utils.z64}
while to_copy:
oid = to_copy.pop()
tid_data = self._data.pop(oid)
......@@ -336,7 +335,7 @@ class MappingStorage(object):
"tpc_vote called with wrong transaction")
class TransactionRecord(object):
class TransactionRecord:
status = ' '
......@@ -363,7 +362,7 @@ class TransactionRecord(object):
@zope.interface.implementer(ZODB.interfaces.IStorageRecordInformation)
class DataRecord(object):
class DataRecord:
"""Abstract base class for iterator protocol"""
version = ''
......
......@@ -26,7 +26,7 @@ from ZODB.utils import readable_tid_repr
def _fmt_undo(oid, reason):
s = reason and (": %s" % reason) or ""
return "Undo error %s%s" % (oid_repr(oid), s)
return "Undo error {}{}".format(oid_repr(oid), s)
def _recon(class_, state):
......@@ -55,7 +55,7 @@ class POSError(Exception):
# the args would then get lost, leading to unprintable exceptions
# and worse. Manually assign to args from the state to be sure
# this doesn't happen.
super(POSError, self).__setstate__(state)
super().__setstate__(state)
self.args = state['args']
......@@ -131,7 +131,7 @@ class ConflictError(POSError, transaction.interfaces.TransientError):
extras.append("serial currently committed %s" %
readable_tid_repr(current))
if extras:
return "%s (%s)" % (self.message, ", ".join(extras))
return "{} ({})".format(self.message, ", ".join(extras))
else:
return self.message
......@@ -262,8 +262,8 @@ class DanglingReferenceError(
self.missing = Boid
def __str__(self):
return "from %s to %s" % (oid_repr(self.referer),
oid_repr(self.missing))
return "from {} to {}".format(oid_repr(self.referer),
oid_repr(self.missing))
############################################################################
......
......@@ -14,7 +14,7 @@
"""Provide backward compatibility with storages that only have undoLog()."""
class UndoLogCompatible(object):
class UndoLogCompatible:
def undoInfo(self, first=0, last=-20, specification=None):
if specification:
......
......@@ -11,70 +11,45 @@
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import sys
from six import PY3
# We can't use stdlib's pickle because of http://bugs.python.org/issue6784
import zodbpickle.pickle
from zodbpickle import binary # noqa: F401 import unused
_protocol = 3
FILESTORAGE_MAGIC = b"FS30"
HIGHEST_PROTOCOL = 3
IS_JYTHON = sys.platform.startswith('java')
_protocol = 3
class Pickler(zodbpickle.pickle.Pickler):
def __init__(self, f, protocol=None):
super().__init__(f, protocol)
class Unpickler(zodbpickle.pickle.Unpickler):
def __init__(self, f):
super().__init__(f)
# Python doesn't allow assignments to find_global,
# instead, find_class can be overridden
find_global = None
def find_class(self, modulename, name):
if self.find_global is None:
return super().find_class(modulename, name)
return self.find_global(modulename, name)
def dump(o, f, protocol=None):
return zodbpickle.pickle.dump(o, f, protocol)
def dumps(o, protocol=None):
return zodbpickle.pickle.dumps(o, protocol)
if not PY3:
# Python 2.x
# PyPy's cPickle doesn't have noload, and noload is broken in Python 2.7,
# so we need zodbpickle.
# Get the fastest working version we can (PyPy has no fastpickle)
try:
import zodbpickle.fastpickle as cPickle
except ImportError:
import zodbpickle.pickle as cPickle
Pickler = cPickle.Pickler
Unpickler = cPickle.Unpickler
dump = cPickle.dump
dumps = cPickle.dumps
loads = cPickle.loads
HIGHEST_PROTOCOL = cPickle.HIGHEST_PROTOCOL
IMPORT_MAPPING = {}
NAME_MAPPING = {}
FILESTORAGE_MAGIC = b"FS21"
else:
# Python 3.x: can't use stdlib's pickle because
# http://bugs.python.org/issue6784
import zodbpickle.pickle
HIGHEST_PROTOCOL = 3
from _compat_pickle import IMPORT_MAPPING # noqa: F401 import unused
from _compat_pickle import NAME_MAPPING # noqa: F401 import unused
class Pickler(zodbpickle.pickle.Pickler):
def __init__(self, f, protocol=None):
super(Pickler, self).__init__(f, protocol)
class Unpickler(zodbpickle.pickle.Unpickler):
def __init__(self, f):
super(Unpickler, self).__init__(f)
# Py3: Python 3 doesn't allow assignments to find_global,
# instead, find_class can be overridden
find_global = None
def find_class(self, modulename, name):
if self.find_global is None:
return super(Unpickler, self).find_class(modulename, name)
return self.find_global(modulename, name)
def dump(o, f, protocol=None):
return zodbpickle.pickle.dump(o, f, protocol)
def dumps(o, protocol=None):
return zodbpickle.pickle.dumps(o, protocol)
def loads(s):
return zodbpickle.pickle.loads(s, encoding='ASCII', errors='bytes')
FILESTORAGE_MAGIC = b"FS30"
def loads(s):
return zodbpickle.pickle.loads(s, encoding='ASCII', errors='bytes')
def PersistentPickler(persistent_id, *args, **kwargs):
......@@ -83,15 +58,13 @@ def PersistentPickler(persistent_id, *args, **kwargs):
to get persistent IDs. The remainder of the arguments are passed to the
Pickler itself.
This covers the differences between Python 2 and 3 and PyPy/zodbpickle.
This covers the differences between CPython and PyPy/zodbpickle.
"""
p = Pickler(*args, **kwargs)
if not PY3:
p.inst_persistent_id = persistent_id
# PyPy uses a python implementation of cPickle/zodbpickle in both Python 2
# and Python 3. We can't really detect inst_persistent_id as its
# a magic attribute that's not readable, but it doesn't hurt to
# PyPy uses a python implementation of cPickle/zodbpickle.
# We can't really detect `inst_persistent_id` as it is
# a magic attribute that is not readable, but it doesn't hurt to
# simply always assign to persistent_id also
p.persistent_id = persistent_id
return p
......@@ -103,7 +76,7 @@ def PersistentUnpickler(find_global, load_persistent, *args, **kwargs):
to locate classes, and the given `load_persistent` function to load
objects from a persistent id.
This covers the differences between Python 2 and 3 and PyPy/zodbpickle.
This covers the differences between CPython and PyPy/zodbpickle.
"""
unpickler = Unpickler(*args, **kwargs)
if find_global is not None:
......@@ -119,56 +92,7 @@ def PersistentUnpickler(find_global, load_persistent, *args, **kwargs):
return unpickler
try:
# XXX: why not just import BytesIO from io?
from cStringIO import StringIO as BytesIO
except ImportError:
# Python 3.x
from io import BytesIO # noqa: F401 import unused
try:
# Python 3.x
from base64 import decodebytes
from base64 import encodebytes
except ImportError:
# Python 2.x
from base64 import decodestring as decodebytes
from base64 import encodestring as encodebytes
# I want to use 'noqa: F401 import unused' on the lines above, for flake8,
# but isort removes them. So mention the two imported functions here,
# so neither flake8 nor isort complains.
decodebytes, encodebytes
# Python 3.x: ``hasattr()`` swallows only AttributeError.
def py2_hasattr(obj, name):
try:
getattr(obj, name)
except: # noqa: E722 do not use bare 'except'
return False
return True
try:
# Py2: simply reexport the builtin
long = long
except NameError:
# Py3
long = int
INT_TYPES = (int,)
else:
INT_TYPES = (int, long)
try:
TEXT = unicode
except NameError: # pragma NO COVER Py3k
TEXT = str
def ascii_bytes(x):
if isinstance(x, TEXT):
if isinstance(x, str):
x = x.encode('ascii')
return x
......@@ -23,25 +23,21 @@ import stat
import sys
import tempfile
import weakref
from base64 import decodebytes
from io import BytesIO
from io import FileIO
import persistent
import zope.interface
import ZODB.interfaces
from ZODB import utils
from ZODB._compat import PY3
from ZODB._compat import BytesIO
from ZODB._compat import PersistentUnpickler
from ZODB._compat import ascii_bytes
from ZODB._compat import decodebytes
from ZODB.interfaces import BlobError
from ZODB.POSException import POSKeyError
if PY3:
from io import FileIO as file
logger = logging.getLogger('ZODB.blob')
BLOB_SUFFIX = ".blob"
......@@ -102,7 +98,7 @@ class Blob(persistent.Persistent):
# Only ghostify if we are unopened.
if self.readers or self.writers:
return
super(Blob, self)._p_deactivate()
super()._p_deactivate()
def _p_invalidate(self):
# Force-close any open readers or writers,
......@@ -117,7 +113,7 @@ class Blob(persistent.Persistent):
if (self._p_blob_uncommitted):
os.remove(self._p_blob_uncommitted)
super(Blob, self)._p_invalidate()
super()._p_invalidate()
def opened(self):
return bool(self.readers or self.writers)
......@@ -316,7 +312,7 @@ class Blob(persistent.Persistent):
return filename
class BlobFile(file):
class BlobFile(FileIO):
"""A BlobFile that holds a file handle to actual blob data.
It is a file that can be used within a transaction boundary; a BlobFile is
......@@ -330,21 +326,18 @@ class BlobFile(file):
# the storage later puts them to avoid copying them ...
def __init__(self, name, mode, blob):
super(BlobFile, self).__init__(name, mode+'b')
super().__init__(name, mode+'b')
self.blob = blob
def close(self):
self.blob.closed(self)
super(BlobFile, self).close()
super().close()
def __reduce__(self):
# Python 3 cannot pickle an open file with any pickle protocol
# Python cannot pickle an open file with any pickle protocol
# because of the underlying _io.BufferedReader/Writer object.
# Python 2 cannot pickle a file with a protocol < 2, but
# protocol 2 *can* pickle an open file; the result of unpickling
# is a closed file object.
# It's pointless to do that with a blob, so we make sure to
# prohibit it on all versions.
# prohibit it.
raise TypeError("Pickling a BlobFile is not allowed")
......@@ -352,11 +345,11 @@ _pid = str(os.getpid())
def log(msg, level=logging.INFO, subsys=_pid, exc_info=False):
message = "(%s) %s" % (subsys, msg)
message = "({}) {}".format(subsys, msg)
logger.log(level, message, exc_info=exc_info)
class FilesystemHelper(object):
class FilesystemHelper:
# Storages that implement IBlobStorage can choose to use this
# helper class to generate and parse blob filenames. This is not
# a set-in-stone interface for all filesystem operations dealing
......@@ -391,7 +384,7 @@ class FilesystemHelper(object):
with open(layout_marker_path, 'w') as layout_marker:
layout_marker.write(self.layout_name)
else:
with open(layout_marker_path, 'r') as layout_marker:
with open(layout_marker_path) as layout_marker:
layout = layout_marker.read().strip()
if layout != self.layout_name:
raise ValueError(
......@@ -530,7 +523,7 @@ class FilesystemHelper(object):
yield oid, path
class NoBlobsFileSystemHelper(object):
class NoBlobsFileSystemHelper:
@property
def temp_dir(self):
......@@ -548,7 +541,7 @@ def auto_layout_select(path):
# use.
layout_marker = os.path.join(path, LAYOUT_MARKER)
if os.path.exists(layout_marker):
with open(layout_marker, 'r') as fp:
with open(layout_marker) as fp:
layout = fp.read().strip()
log('Blob directory `%s` has layout marker set. '
'Selected `%s` layout. ' % (path, layout), level=logging.DEBUG)
......@@ -574,7 +567,7 @@ def auto_layout_select(path):
return layout
class BushyLayout(object):
class BushyLayout:
"""A bushy directory layout for blob directories.
Creates an 8-level directory structure (one level per byte) in
......@@ -617,7 +610,7 @@ class BushyLayout(object):
"""
oid_path = self.oid_to_path(oid)
filename = "%s%s" % (utils.tid_repr(tid), BLOB_SUFFIX)
filename = "{}{}".format(utils.tid_repr(tid), BLOB_SUFFIX)
return os.path.join(oid_path, filename)
......@@ -648,7 +641,7 @@ class LawnLayout(BushyLayout):
LAYOUTS['lawn'] = LawnLayout()
class BlobStorageMixin(object):
class BlobStorageMixin:
"""A mix-in to help storages support blobs."""
def _blob_init(self, blob_dir, layout='automatic'):
......@@ -678,7 +671,7 @@ class BlobStorageMixin(object):
def registerDB(self, db):
self.__untransform_record_data = db.untransform_record_data
try:
m = super(BlobStorageMixin, self).registerDB
m = super().registerDB
except AttributeError:
pass
else:
......@@ -772,8 +765,8 @@ class BlobStorage(BlobStorageMixin):
def __repr__(self):
normal_storage = self.__storage
return '<BlobStorage proxy for %r at %s>' % (normal_storage,
hex(id(self)))
return '<BlobStorage proxy for {!r} at {}>'.format(normal_storage,
hex(id(self)))
def tpc_finish(self, *arg, **kw):
# We need to override the base storage's tpc_finish instead of
......
......@@ -18,17 +18,17 @@ import sys
import persistent
import zope.interface
from _compat_pickle import IMPORT_MAPPING
from _compat_pickle import NAME_MAPPING
import ZODB.interfaces
from ZODB._compat import IMPORT_MAPPING
from ZODB._compat import NAME_MAPPING
broken_cache = {}
@zope.interface.implementer(ZODB.interfaces.IBroken)
class Broken(object):
class Broken:
"""Broken object base class
Broken objects are placeholders for objects that can no longer be
......@@ -66,7 +66,7 @@ class Broken(object):
>>> a.x = 1
Traceback (most recent call last):
...
BrokenModified: Can't change broken objects
ZODB.broken.BrokenModified: Can't change broken objects
But you can set their state::
......@@ -126,7 +126,7 @@ class Broken(object):
self.__dict__['__Broken_state__'] = state
def __repr__(self):
return "<broken %s.%s instance>" % (
return "<broken {}.{} instance>".format(
self.__class__.__module__, self.__class__.__name__)
def __setattr__(self, name, value):
......@@ -298,7 +298,7 @@ class PersistentBroken(Broken, persistent.Persistent):
>>> a.x = 1
Traceback (most recent call last):
...
BrokenModified: Can't change broken objects
ZODB.broken.BrokenModified: Can't change broken objects
Unlike regular broken objects, persistent broken objects keep
track of persistence meta data:
......@@ -312,7 +312,7 @@ class PersistentBroken(Broken, persistent.Persistent):
>>> a.__reduce__() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
BrokenModified:
ZODB.broken.BrokenModified:
<persistent broken not.there.Atall instance '\x00\x00\x00\x00****'>
but you can get their state:
......@@ -345,7 +345,7 @@ class PersistentBroken(Broken, persistent.Persistent):
raise BrokenModified("Can't change broken objects")
def __repr__(self):
return "<persistent broken %s.%s instance %r>" % (
return "<persistent broken {}.{} instance {!r}>".format(
self.__class__.__module__, self.__class__.__name__,
self._p_oid)
......
......@@ -13,19 +13,13 @@
##############################################################################
"""Open database and storage from a configuration."""
import os
from io import StringIO
import ZConfig
import ZODB
try:
from cStringIO import StringIO
except ImportError:
# Py3
from io import StringIO
db_schema_path = os.path.join(ZODB.__path__[0], "config.xml")
_db_schema = None
......@@ -117,7 +111,7 @@ def storageFromConfig(section):
return section.open()
class BaseConfig(object):
class BaseConfig:
"""Object representing a configured storage or database.
Methods:
......
......@@ -15,7 +15,7 @@
import persistent.mapping
class fixer(object):
class fixer:
def __of__(self, parent):
def __setstate__(state, self=parent):
self._container = state
......@@ -26,7 +26,7 @@ class fixer(object):
fixer = fixer()
class hack(object):
class hack:
pass
......
......@@ -61,7 +61,7 @@ It isn't valid to create references outside a multi database:
>>> tm.commit() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
InvalidObjectReference:
ZODB.POSException.InvalidObjectReference:
('Attempt to store an object from a foreign database connection',
<ZODB.Connection.Connection object at ...>,
<ZODB.tests.testcrossdatabasereferences.MyClass...>)
......@@ -89,7 +89,7 @@ reachable from multiple databases:
>>> tm.commit() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
InvalidObjectReference:
ZODB.POSException.InvalidObjectReference:
("A new object is reachable from multiple databases. Won't try to
guess which one was correct!",
<ZODB.Connection.Connection object at ...>,
......@@ -117,7 +117,7 @@ This doesn't work with a savepoint:
>>> tm.commit() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
InvalidObjectReference:
ZODB.POSException.InvalidObjectReference:
("A new object is reachable from multiple databases. Won't try to guess
which one was correct!",
<ZODB.Connection.Connection object at ...>,
......@@ -165,7 +165,7 @@ the other way around.
>>> transaction.commit() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
InvalidObjectReference:
ZODB.POSException.InvalidObjectReference:
("Database '2' doesn't allow implicit cross-database references",
<ZODB.Connection.Connection object at ...>,
{'x': {}})
......
......@@ -40,12 +40,9 @@
# integers.
import struct
import six
from BTrees.fsBTree import fsBucket
from BTrees.OOBTree import OOBTree
from ZODB._compat import INT_TYPES
from ZODB._compat import Pickler
from ZODB._compat import Unpickler
from ZODB._compat import _protocol
......@@ -72,11 +69,11 @@ def prefix_minus_one(s):
def ensure_bytes(s):
# on Python 3 we might pickle bytes and unpickle unicode strings
return s.encode('ascii') if not isinstance(s, bytes) else s
# we might pickle bytes and unpickle str objects
return s if isinstance(s, bytes) else s.encode('ascii')
class fsIndex(object):
class fsIndex:
def __init__(self, data=None):
self._data = OOBTree()
......@@ -87,7 +84,7 @@ class fsIndex(object):
return dict(
state_version=1,
_data=[(k, v.toString())
for (k, v) in six.iteritems(self._data)
for (k, v) in self._data.items()
]
)
......@@ -118,7 +115,7 @@ class fsIndex(object):
pickler = Pickler(f, _protocol)
pickler.fast = True
pickler.dump(pos)
for k, v in six.iteritems(self._data):
for k, v in self._data.items():
pickler.dump((k, v.toString()))
pickler.dump(None)
......@@ -127,9 +124,9 @@ class fsIndex(object):
with open(fname, 'rb') as f:
unpickler = Unpickler(f)
pos = unpickler.load()
if not isinstance(pos, INT_TYPES):
if not isinstance(pos, int):
# NB: this might contain OIDs that got unpickled
# into Unicode strings on Python 3; hope the caller
# into str objects; hope the caller
# will pipe the result to fsIndex().update() to normalize
# the keys
return pos # Old format
......@@ -175,7 +172,7 @@ class fsIndex(object):
def __len__(self):
r = 0
for tree in six.itervalues(self._data):
for tree in self._data.values():
r += len(tree)
return r
......@@ -201,7 +198,7 @@ class fsIndex(object):
self._data.clear()
def __iter__(self):
for prefix, tree in six.iteritems(self._data):
for prefix, tree in self._data.items():
for suffix in tree:
yield prefix + suffix
......@@ -211,16 +208,16 @@ class fsIndex(object):
return list(self.iterkeys())
def iteritems(self):
for prefix, tree in six.iteritems(self._data):
for suffix, value in six.iteritems(tree):
for prefix, tree in self._data.items():
for suffix, value in tree.items():
yield (prefix + suffix, str2num(value))
def items(self):
return list(self.iteritems())
def itervalues(self):
for tree in six.itervalues(self._data):
for value in six.itervalues(tree):
for tree in self._data.values():
for value in tree.values():
yield str2num(value)
def values(self):
......
......@@ -51,7 +51,6 @@ Options:
Important: The ZODB package must be importable. You may need to adjust
PYTHONPATH accordingly.
"""
from __future__ import print_function
import getopt
import os
......@@ -322,14 +321,14 @@ def recover(inp, outp, verbose=0, partial=False, force=False, pack=None):
t = TimeStamp(tid)
if t <= _ts:
if ok:
print(("Time stamps out of order %s, %s" % (_ts, t)))
print("Time stamps out of order {}, {}".format(_ts, t))
ok = 0
_ts = t.laterThan(_ts)
tid = _ts.raw()
else:
_ts = t
if not ok:
print(("Time stamps back in order %s" % (t)))
print("Time stamps back in order %s" % (t))
ok = 1
ofs.tpc_begin(txn, tid, txn.status)
......
......@@ -30,7 +30,7 @@ from ZODB.FileStorage.format import TRANS_HDR_LEN
from ZODB.utils import u64
class TxnHeader(object):
class TxnHeader:
"""Object representing a transaction record header.
Attribute Position Value
......@@ -104,7 +104,7 @@ class TxnHeader(object):
return TxnHeader(self._file, self._pos - (tlen + 8))
class DataHeader(object):
class DataHeader:
"""Object representing a data record header.
Attribute Position Value
......
......@@ -100,7 +100,7 @@ Moreover, the historical connection cannot commit changes.
>>> transaction1.commit()
Traceback (most recent call last):
...
ReadOnlyHistoryError
ZODB.POSException.ReadOnlyHistoryError
>>> transaction1.abort()
>>> historical_conn.root()['first']['count']
0
......
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) Zope Corporation and Contributors.
......
# -*- coding: utf-8 -*-
"""Adapt IStorage objects to IMVCCStorage
This is a largely internal implementation of ZODB, especially DB and
......@@ -19,7 +18,7 @@ from .utils import tid_repr
from .utils import u64
class Base(object):
class Base:
_copy_methods = (
'getName', 'getSize', 'history', 'lastTransaction', 'sortKey',
......
......@@ -42,7 +42,7 @@ $Id$
# - We'll use _p_changed is None to indicate that we're in this state.
#
class _p_DataDescr(object):
class _p_DataDescr:
# Descr used as base for _p_ data. Data are stored in
# _p_class_dict.
......@@ -84,7 +84,7 @@ class _p_oid_or_jar_Descr(_p_DataDescr):
jar.setstate(inst)
class _p_ChangedDescr(object):
class _p_ChangedDescr:
# descriptor to handle special weird semantics of _p_changed
def __get__(self, inst, cls):
......@@ -101,7 +101,7 @@ class _p_ChangedDescr(object):
inst._p_invalidate()
class _p_MethodDescr(object):
class _p_MethodDescr:
"""Provide unassignable class attributes
"""
......@@ -131,10 +131,11 @@ class PersistentMetaClass(type):
_p_serial = _p_DataDescr('_p_serial')
def __new__(self, name, bases, cdict, _p_changed=False):
cdict = dict([(k, v) for (k, v) in cdict.items()
if not k.startswith('_p_')])
cdict = {k: v
for (k, v) in cdict.items()
if not k.startswith('_p_')}
cdict['_p_class_dict'] = {'_p_changed': _p_changed}
return super(PersistentMetaClass, self).__new__(
return super().__new__(
self, name, bases, cdict)
def __getnewargs__(self):
......@@ -158,14 +159,14 @@ class PersistentMetaClass(type):
data_manager.register(self)
def __setattr__(self, name, v):
if not ((name.startswith('_p_') or name.startswith('_v'))):
if not (name.startswith('_p_') or name.startswith('_v')):
self._p_maybeupdate(name)
super(PersistentMetaClass, self).__setattr__(name, v)
super().__setattr__(name, v)
def __delattr__(self, name):
if not ((name.startswith('_p_') or name.startswith('_v'))):
if not (name.startswith('_p_') or name.startswith('_v')):
self._p_maybeupdate(name)
super(PersistentMetaClass, self).__delattr__(name)
super().__delattr__(name)
def _p_deactivate(self):
# persistent classes can't be ghosts
......@@ -182,13 +183,11 @@ class PersistentMetaClass(type):
def __getstate__(self):
return (self.__bases__,
dict([(k, v) for (k, v) in self.__dict__.items()
if not (k.startswith('_p_')
or k.startswith('_v_')
or k in special_class_descrs
)
]),
)
{k: v
for (k, v) in self.__dict__.items()
if not (k.startswith('_p_')
or k.startswith('_v_')
or k in special_class_descrs)})
__getstate__ = _p_MethodDescr(__getstate__)
......@@ -197,8 +196,9 @@ class PersistentMetaClass(type):
if self.__bases__ != bases:
# __getnewargs__ should've taken care of that
raise AssertionError(self.__bases__, '!=', bases)
cdict = dict([(k, v) for (k, v) in cdict.items()
if not k.startswith('_p_')])
cdict = {k: v
for (k, v) in cdict.items()
if not k.startswith('_p_')}
_p_class_dict = self._p_class_dict
self._p_class_dict = {}
......
......@@ -66,9 +66,8 @@ We can create and use instances of the class:
We can modify the class and none of the persistent attributes will
change because the object hasn't been saved.
>>> import six
>>> def bar(self):
... six.print_('bar', self.name)
... print('bar', self.name)
>>> C.bar = bar
>>> c.bar()
bar first
......@@ -93,7 +92,7 @@ Now, if we look at the persistence variables, we'll see that they have
values:
>>> C._p_oid
'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
>>> C._p_jar is not None
True
>>> C._p_serial is not None
......@@ -104,7 +103,7 @@ values:
Now, if we modify the class:
>>> def baz(self):
... six.print_('baz', self.name)
... print('baz', self.name)
>>> C.baz = baz
>>> c.baz()
baz first
......@@ -229,7 +228,7 @@ Now, if we try to load it, we get a broken object:
>>> connection2.sync()
>>> connection2.root()['obs']['p']
<persistent broken __zodb__.P instance '\x00\x00\x00\x00\x00\x00\x00\x04'>
<persistent broken __zodb__.P instance b'\x00\x00\x00\x00\x00\x00\x00\x04'>
because the module, `__zodb__` can't be loaded. We need to provide a
class factory that knows about this special module. Here we'll supply a
......
#!/usr/bin/env python
# Based on a transaction analyzer by Matt Kromer.
from __future__ import print_function
import sys
from io import BytesIO
from ZODB._compat import BytesIO
from ZODB._compat import PersistentUnpickler
from ZODB.FileStorage import FileStorage
......@@ -26,7 +25,7 @@ def FakeUnpickler(f):
return unpickler
class Report(object):
class Report:
def __init__(self):
self.OIDMAP = {}
self.TYPEMAP = {}
......@@ -58,10 +57,10 @@ def shorten(s, n):
def report(rep):
print("Processed %d records in %d transactions" % (rep.OIDS, rep.TIDS))
print("Average record size is %7.2f bytes" % (rep.DBYTES * 1.0 / rep.OIDS))
print(("Average transaction size is %7.2f bytes" %
(rep.DBYTES * 1.0 / rep.TIDS)))
print(f"Processed {rep.OIDS} records in {rep.TIDS} transactions")
print(f"Average record size is {rep.DBYTES * 1.0 / rep.OIDS:7.2f} bytes")
print("Average transaction size is"
f" {rep.DBYTES * 1.0 / rep.TIDS:7.2f} bytes")
print("Types used:")
fmt = "%-46s %7s %9s %6s %7s"
......@@ -112,11 +111,11 @@ def get_type(record):
try:
unpickled = FakeUnpickler(BytesIO(record.data)).load()
except FakeError as err:
return "%s.%s" % (err.module, err.name)
return "{}.{}".format(err.module, err.name)
classinfo = unpickled[0]
if isinstance(classinfo, tuple):
mod, klass = classinfo
return "%s.%s" % (mod, klass)
return "{}.{}".format(mod, klass)
else:
return str(classinfo)
......
......@@ -6,7 +6,6 @@ usage: checkbtrees.py data.fs
Try to find all the BTrees in a Data.fs, call their _check() methods,
and run them through BTrees.check.check().
"""
from __future__ import print_function
from BTrees.check import check
......@@ -116,9 +115,9 @@ def main(fname=None):
for k, v in get_subobjects(obj):
if k.startswith('['):
# getitem
newpath = "%s%s" % (path, k)
newpath = "{}{}".format(path, k)
else:
newpath = "%s.%s" % (path, k)
newpath = "{}.{}".format(path, k)
add_if_new_persistent(todo, v, newpath)
print("total", len(fs._index), "found", found)
......
......@@ -37,7 +37,6 @@ is not recommended (spurious error messages may result).
See testfsoids.py for a tutorial doctest.
"""
from __future__ import print_function
import sys
......
......@@ -61,7 +61,6 @@ of objects, it does not attempt to load objects in versions, or non-current
revisions of objects; therefore fsrefs cannot find problems in versions or
in non-current revisions.
"""
from __future__ import print_function
import traceback
......@@ -90,15 +89,15 @@ def report(oid, data, serial, missing):
else:
plural = ""
ts = TimeStamp(serial)
print("oid %s %s.%s" % (hex(u64(oid)), from_mod, from_class))
print("last updated: %s, tid=%s" % (ts, hex(u64(serial))))
print("oid {} {}.{}".format(hex(u64(oid)), from_mod, from_class))
print("last updated: {}, tid={}".format(ts, hex(u64(serial))))
print("refers to invalid object%s:" % plural)
for oid, info, reason in missing:
if isinstance(info, tuple):
description = "%s.%s" % info
else:
description = str(info)
print("\toid %s %s: %r" % (oid_repr(oid), reason, description))
print("\toid {} {}: {!r}".format(oid_repr(oid), reason, description))
print()
......
#!/usr/bin/env python2
"""Print details statistics from fsdump output."""
from __future__ import print_function
import re
import sys
import six
from six.moves import filter
rx_txn = re.compile(r"tid=([0-9a-f]+).*size=(\d+)")
rx_data = re.compile(r"oid=([0-9a-f]+) size=(\d+) class=(\S+)")
......@@ -27,10 +23,10 @@ class Histogram(dict):
self[size] = self.get(size, 0) + 1
def size(self):
return sum(six.itervalues(self))
return sum(self.values())
def mean(self):
product = sum([k * v for k, v in six.iteritems(self)])
product = sum([k * v for k, v in self.items()])
return product / self.size()
def median(self):
......@@ -47,7 +43,7 @@ class Histogram(dict):
def mode(self):
mode = 0
value = 0
for k, v in six.iteritems(self):
for k, v in self.items():
if v > value:
value = v
mode = k
......@@ -55,12 +51,12 @@ class Histogram(dict):
def make_bins(self, binsize):
try:
maxkey = max(six.iterkeys(self))
maxkey = max(self.keys())
except ValueError:
maxkey = 0
self.binsize = binsize
self.bins = [0] * (1 + maxkey // binsize)
for k, v in six.iteritems(self):
for k, v in self.items():
b = k // binsize
self.bins[b] += v
......@@ -105,12 +101,12 @@ def class_detail(class_size):
labels = ["num", "median", "mean", "mode", "class"]
print(fmt % tuple(labels))
print(fmt % tuple(["-" * len(s) for s in labels]))
for klass, h in sort_byhsize(six.iteritems(class_size)):
for klass, h in sort_byhsize(class_size.items()):
print(fmt % (h.size(), h.median(), h.mean(), h.mode(), klass))
print()
# per class details
for klass, h in sort_byhsize(six.iteritems(class_size), reverse=True):
for klass, h in sort_byhsize(class_size.items(), reverse=True):
h.make_bins(50)
if len(tuple(filter(None, h.bins))) == 1:
continue
......@@ -119,7 +115,7 @@ def class_detail(class_size):
def revision_detail(lifetimes, classes):
# Report per-class details for any object modified more than once
for name, oids in six.iteritems(classes):
for name, oids in classes.items():
h = Histogram()
keep = False
for oid in dict.fromkeys(oids, 1):
......@@ -148,7 +144,7 @@ def main(path=None):
objects = 0
tid = None
f = open(path, "r")
f = open(path)
for i, line in enumerate(f):
if MAX and i > MAX:
break
......
......@@ -13,21 +13,15 @@
#
##############################################################################
"""Tool to dump the last few transactions from a FileStorage."""
from __future__ import print_function
import binascii
import getopt
import sys
from hashlib import sha1
from ZODB.fstools import prev_txn
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
def main(path, ntxn):
with open(path, "rb") as f:
f.seek(0, 2)
......@@ -36,10 +30,10 @@ def main(path, ntxn):
while th and i > 0:
hash = sha1(th.get_raw_data()).digest()
th.read_meta()
print("%s: hash=%s" % (th.get_timestamp(),
binascii.hexlify(hash).decode()))
print(("user=%r description=%r length=%d offset=%d (+%d)"
% (th.user, th.descr, th.length, th.get_offset(), len(th))))
print("{}: hash={}".format(th.get_timestamp(),
binascii.hexlify(hash).decode()))
print("user=%r description=%r length=%d offset=%d (+%d)"
% (th.user, th.descr, th.length, th.get_offset(), len(th)))
print()
th = th.prev_txn()
i -= 1
......
......@@ -30,7 +30,6 @@ Note: It does not check the consistency of the object pickles. It is
possible for the damage to occur only in the part of the file that
stores object pickles. Those errors will go undetected.
"""
from __future__ import print_function
import binascii
import struct
......@@ -48,7 +47,7 @@ class FormatError(ValueError):
"""There is a problem with the format of the FileStorage."""
class Status(object):
class Status:
checkpoint = b'c'
undone = b'u'
......@@ -115,11 +114,11 @@ def check_trec(path, file, pos, ltid, file_size):
used for generating error messages.
"""
h = file.read(TREC_HDR_LEN) # XXX must be bytes under Py3k
h = file.read(TREC_HDR_LEN) # XXX must be bytes
if not h:
return None, None
if len(h) != TREC_HDR_LEN:
raise FormatError("%s truncated at %s" % (path, pos))
raise FormatError("{} truncated at {}".format(path, pos))
tid, stl, status, ul, dl, el = struct.unpack(">8s8scHHH", h)
tmeta_len = TREC_HDR_LEN + ul + dl + el
......@@ -177,7 +176,7 @@ def check_drec(path, file, pos, tpos, tid):
h = file.read(DREC_HDR_LEN)
if len(h) != DREC_HDR_LEN:
raise FormatError("%s truncated at %s" % (path, pos))
raise FormatError("{} truncated at {}".format(path, pos))
oid, serial, _prev, _tloc, vlen, _plen = (
struct.unpack(">8s8s8s8sH8s", h))
U64(_prev)
......
......@@ -72,7 +72,6 @@ Positional arguments:
Comma separated list of arguments for the source storage, as key=val
pairs. E.g. "name=full;frequency=3600"
"""
from __future__ import print_function
import getopt
import marshal
......@@ -114,7 +113,7 @@ def main():
except getopt.error as msg:
error(2, msg)
class Options(object):
class Options:
stype = 'FileStorage'
dtype = 'FileStorage'
verbose = 0
......@@ -249,7 +248,7 @@ def doit(srcdb, dstdb, options):
t = TimeStamp(tid)
if t <= ts:
if ok:
print('Time stamps are out of order %s, %s' % (ts, t),
print('Time stamps are out of order {}, {}'.format(ts, t),
file=sys.stderr)
ok = False
ts = t.laterThan(ts)
......@@ -331,7 +330,7 @@ def doit(srcdb, dstdb, options):
# helper to deal with differences between old-style store() return and
# new-style store() return that supports ZEO
class RevidAccumulator(object):
class RevidAccumulator:
def __init__(self):
self.data = {}
......
......@@ -13,7 +13,6 @@
##############################################################################
"""A script to migrate a blob directory into a different layout.
"""
from __future__ import print_function
import logging
import optparse
......@@ -42,7 +41,7 @@ def migrate(source, dest, layout):
source_fsh.create()
dest_fsh = FilesystemHelper(dest, layout)
dest_fsh.create()
print("Migrating blob data from `%s` (%s) to `%s` (%s)" % (
print("Migrating blob data from `{}` ({}) to `{}` ({})".format(
source, source_fsh.layout_name, dest, dest_fsh.layout_name))
for oid, path in source_fsh.listOIDs():
dest_path = dest_fsh.getPathForOID(oid, create=True)
......@@ -51,7 +50,7 @@ def migrate(source, dest, layout):
source_file = os.path.join(path, file)
dest_file = os.path.join(dest_path, file)
link_or_copy(source_file, dest_file)
print("\tOID: %s - %s files " % (oid_repr(oid), len(files)))
print("\tOID: {} - {} files ".format(oid_repr(oid), len(files)))
def main(source=None, dest=None, layout="bushy"):
......
......@@ -6,9 +6,7 @@ usage: netspace.py [-P | -v] data.fs
-P: do a pack first
-v: print info for all objects, even if a traversal path isn't found
"""
from __future__ import print_function
from six.moves import filter
import ZODB
from ZODB.FileStorage import FileStorage
......@@ -49,7 +47,7 @@ def find_paths(root, maxdist):
continue
for k, v in items:
oid = getattr(v, '_p_oid', None)
objs.append(("%s.%s" % (path, k), v, oid, dist + 1))
objs.append(("{}.{}".format(path, k), v, oid, dist + 1))
return paths
......
......@@ -84,7 +84,6 @@ Options for -V/--verify:
-Q / --quick
Verify file sizes only (skip md5 checksums).
"""
from __future__ import print_function
import errno
import getopt
......@@ -96,8 +95,6 @@ import sys
import time
from hashlib import md5
from six.moves import filter
from ZODB.FileStorage import FileStorage
......@@ -128,7 +125,7 @@ class VerificationFail(RepozoError):
pass
class _GzipCloser(object):
class _GzipCloser:
def __init__(self, fqn, mode):
self._opened = gzip.open(fqn, mode)
......@@ -184,7 +181,7 @@ def parseargs(argv):
except getopt.error as msg:
usage(1, msg)
class Options(object):
class Options:
mode = None # BACKUP, RECOVER or VERIFY
file = None # name of input Data.fs file
repository = None # name of directory holding backups
......@@ -473,7 +470,7 @@ def scandat(repofiles):
fn = startpos = endpos = sum = None # assume .dat file missing or empty
try:
fp = open(datfile)
except IOError as e:
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
......@@ -681,7 +678,7 @@ def do_recover(options):
repofiles = find_files(options)
if not repofiles:
if options.date:
raise NoFiles('No files in repository before %s' % (options.date,))
raise NoFiles(f'No files in repository before {options.date}')
else:
raise NoFiles('No files in repository')
......@@ -725,7 +722,7 @@ def do_recover(options):
repofile, reposz, expected_truth['size']))
if reposum != expected_truth['sum']:
raise VerificationFail(
"%s has checksum %s instead of %s" % (
"{} has checksum {} instead of {}".format(
repofile, reposum, expected_truth['sum']))
totalsz += reposz
log("Recovered chunk %s : %s bytes, md5: %s",
......@@ -782,7 +779,7 @@ def do_verify(options):
actual_sum, size = get_checksum_and_size_of_file(
filename, options.quick)
when_uncompressed = ''
except IOError:
except OSError:
error("%s is missing", filename)
continue
if size != expected_size:
......
......@@ -7,9 +7,6 @@ The current implementation only supports FileStorage.
Current limitations / simplifications: Ignores revisions and versions.
"""
from __future__ import print_function
import six
from ZODB.FileStorage import FileStorage
from ZODB.utils import U64
......@@ -21,14 +18,14 @@ def run(path, v=0):
fs = FileStorage(path, read_only=1)
# break into the file implementation
if hasattr(fs._index, 'iterkeys'):
iter = six.iterkeys(fs._index)
iter = fs._index.keys()
else:
iter = fs._index.keys()
totals = {}
for oid in iter:
data, serialno = load_current(fs, oid)
mod, klass = get_pickle_metadata(data)
key = "%s.%s" % (mod, klass)
key = "{}.{}".format(mod, klass)
bytes, count = totals.get(key, (0, 0))
bytes += len(data)
count += 1
......
......@@ -21,7 +21,7 @@ from ZODB.tests.util import run_module_as_script
class FsdumpFsstatsTests(TestCase):
def setUp(self):
super(FsdumpFsstatsTests, self).setUp()
super().setUp()
# create (empty) storage ``data.fs``
DB("data.fs").close()
......
......@@ -12,11 +12,9 @@
#
##############################################################################
import doctest
import re
import unittest
from zope.testing import setupstack
from zope.testing.renormalizing import RENormalizing
import ZODB
......@@ -45,12 +43,8 @@ def test_fstest_verbose():
def test_suite():
checker = RENormalizing([
# Python 3 drops the u'' prefix on unicode strings
(re.compile(r"u('[^']*')"), r"\1"),
])
return unittest.TestSuite([
doctest.DocTestSuite('ZODB.scripts.fstest', checker=checker),
doctest.DocTestSuite('ZODB.scripts.fstest'),
doctest.DocTestSuite(setUp=setupstack.setUpDirectory,
tearDown=setupstack.tearDown),
])
......@@ -11,7 +11,6 @@
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from __future__ import print_function
import os
import sys
......@@ -23,12 +22,6 @@ from io import StringIO
import ZODB.tests.util # layer used at class scope
if str is bytes:
NativeStringIO = BytesIO
else:
NativeStringIO = StringIO
_NOISY = os.environ.get('NOISY_REPOZO_TEST_OUTPUT')
......@@ -43,7 +36,7 @@ def _read_file(name, mode='rb'):
return f.read()
class OurDB(object):
class OurDB:
_file_name = None
......@@ -105,7 +98,7 @@ class Test_parseargs(unittest.TestCase):
self._old_verbosity = repozo.VERBOSE
self._old_stderr = sys.stderr
repozo.VERBOSE = False
sys.stderr = NativeStringIO()
sys.stderr = StringIO()
def tearDown(self):
from ZODB.scripts import repozo
......@@ -134,7 +127,7 @@ class Test_parseargs(unittest.TestCase):
# zope.testrunner will happily print the traceback and failure message
# into our StringIO before running our tearDown.
old_stdout = sys.stdout
sys.stdout = NativeStringIO()
sys.stdout = StringIO()
try:
self.assertRaises(SystemExit, repozo.parseargs, ['--help'])
self.assertIn('Usage:', sys.stdout.getvalue())
......@@ -251,7 +244,7 @@ class Test_parseargs(unittest.TestCase):
sys.stderr.getvalue())
class FileopsBase(object):
class FileopsBase:
def _makeChunks(self):
from ZODB.scripts.repozo import READCHUNK
......@@ -325,7 +318,7 @@ class Test_checksum(unittest.TestCase, FileopsBase):
self.assertEqual(sum, md5(b'x' * 42).hexdigest())
class OptionsTestBase(object):
class OptionsTestBase:
_repository_directory = None
_data_directory = None
......@@ -342,7 +335,7 @@ class OptionsTestBase(object):
import tempfile
self._repository_directory = tempfile.mkdtemp(prefix='test-repozo-')
class Options(object):
class Options:
repository = self._repository_directory
date = None
......@@ -421,7 +414,7 @@ class Test_concat(OptionsTestBase, unittest.TestCase):
def test_w_ofp(self):
class Faux(object):
class Faux:
_closed = False
def __init__(self):
......@@ -598,7 +591,7 @@ class Test_scandat(OptionsTestBase, unittest.TestCase):
class Test_delete_old_backups(OptionsTestBase, unittest.TestCase):
def _makeOptions(self, filenames=()):
options = super(Test_delete_old_backups, self)._makeOptions()
options = super()._makeOptions()
for filename in filenames:
fqn = os.path.join(options.repository, filename)
_write_file(fqn, b'testing delete_old_backups')
......@@ -1258,22 +1251,23 @@ class MonteCarloTests(unittest.TestCase):
def test_suite():
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
return unittest.TestSuite([
unittest.makeSuite(Test_parseargs),
unittest.makeSuite(Test_dofile),
unittest.makeSuite(Test_checksum),
unittest.makeSuite(Test_copyfile),
unittest.makeSuite(Test_concat),
unittest.makeSuite(Test_gen_filename),
unittest.makeSuite(Test_find_files),
unittest.makeSuite(Test_scandat),
unittest.makeSuite(Test_delete_old_backups),
unittest.makeSuite(Test_do_full_backup),
unittest.makeSuite(Test_do_incremental_backup),
loadTestsFromTestCase(Test_parseargs),
loadTestsFromTestCase(Test_dofile),
loadTestsFromTestCase(Test_checksum),
loadTestsFromTestCase(Test_copyfile),
loadTestsFromTestCase(Test_concat),
loadTestsFromTestCase(Test_gen_filename),
loadTestsFromTestCase(Test_find_files),
loadTestsFromTestCase(Test_scandat),
loadTestsFromTestCase(Test_delete_old_backups),
loadTestsFromTestCase(Test_do_full_backup),
loadTestsFromTestCase(Test_do_incremental_backup),
# unittest.makeSuite(Test_do_backup), #TODO
unittest.makeSuite(Test_do_recover),
unittest.makeSuite(Test_do_verify),
loadTestsFromTestCase(Test_do_recover),
loadTestsFromTestCase(Test_do_verify),
# N.B.: this test take forever to run (~40sec on a fast laptop),
# *and* it is non-deterministic.
unittest.makeSuite(MonteCarloTests),
loadTestsFromTestCase(MonteCarloTests),
])
......@@ -112,7 +112,6 @@ Usage: loadmail2 [options]
are equivalent
"""
from __future__ import print_function
import mailbox
import math
......@@ -126,7 +125,7 @@ import time
import transaction
class JobProducer(object):
class JobProducer:
def __init__(self):
self.jobs = []
......@@ -145,7 +144,7 @@ class JobProducer(object):
return not not self.jobs
class MBox(object):
class MBox:
def __init__(self, filename):
if ' ' in filename:
......@@ -254,7 +253,7 @@ def setup(lib_python):
PLexicon('lex', '', Splitter(), CaseNormalizer())
)
class extra(object):
class extra:
doc_attr = 'PrincipiaSearchSource'
lexicon_id = 'lex'
index_type = 'Okapi BM25 Rank'
......@@ -319,7 +318,7 @@ def run1(tid, db, factory, job, args):
(start, wcomp, ccomp, rconflicts, wconflicts, wcommit, ccommit, r
) = do(db, job, args)
start = "%.4d-%.2d-%.2d %.2d:%.2d:%.2d" % time.localtime(start)[:6]
print("%s %s %8.3g %8.3g %s %s\t%8.3g %8.3g %s %r" % (
print("{} {} {:8.3g} {:8.3g} {} {}\t{:8.3g} {:8.3g} {} {!r}".format(
start, tid, wcomp, ccomp, rconflicts, wconflicts, wcommit, ccommit,
factory.__name__, r))
......@@ -382,13 +381,13 @@ def index(connection, messages, catalog, max):
return message.number
class IndexJob(object):
class IndexJob:
needs_mbox = 1
catalog = 1
prefix = 'index'
def __init__(self, mbox, number=1, max=0):
self.__name__ = "%s%s_%s" % (self.prefix, number, mbox.__name__)
self.__name__ = "{}{}_{}".format(self.prefix, number, mbox.__name__)
self.mbox, self.number, self.max = mbox, int(number), int(max)
def create(self):
......@@ -459,13 +458,13 @@ def edit(connection, mbox, catalog=1):
return norig, ndel, nins
class EditJob(object):
class EditJob:
needs_mbox = 1
prefix = 'edit'
catalog = 1
def __init__(self, mbox):
self.__name__ = "%s_%s" % (self.prefix, mbox.__name__)
self.__name__ = "{}_{}".format(self.prefix, mbox.__name__)
self.mbox = mbox
def create(self):
......@@ -497,7 +496,7 @@ def search(connection, terms, number):
return n
class SearchJob(object):
class SearchJob:
def __init__(self, terms='', number=10):
......
......@@ -134,17 +134,17 @@ A number of legacyforms are defined:
"""
import logging
from io import BytesIO
from persistent import Persistent
from persistent.wref import WeakRef
from persistent.wref import WeakRefMarker
from zodbpickle import binary
from ZODB import broken
from ZODB._compat import BytesIO
from ZODB._compat import PersistentPickler
from ZODB._compat import PersistentUnpickler
from ZODB._compat import _protocol
from ZODB._compat import binary
from ZODB.POSException import InvalidObjectReference
......@@ -165,7 +165,7 @@ def myhasattr(obj, name, _marker=object()):
return getattr(obj, name, _marker) is not _marker
class ObjectWriter(object):
class ObjectWriter:
"""Serializes objects for storage in the database.
The ObjectWriter creates object pickles in the ZODB format. It
......@@ -210,12 +210,11 @@ class ObjectWriter(object):
>>> bob = P('bob')
>>> oid, cls = writer.persistent_id(bob)
>>> oid
'42'
b'42'
>>> cls is P
True
To work with Python 3, the oid in the persistent id is of the
zodbpickle binary type:
The oid in the persistent id is of the zodbpickle binary type:
>>> oid.__class__ is binary
True
......@@ -225,7 +224,7 @@ class ObjectWriter(object):
these will be assigned by persistent_id():
>>> bob._p_oid
'42'
b'42'
>>> bob._p_jar is jar
True
......@@ -234,7 +233,7 @@ class ObjectWriter(object):
>>> bob._p_oid = b'24'
>>> oid, cls = writer.persistent_id(bob)
>>> oid
'24'
b'24'
>>> cls is P
True
......@@ -245,7 +244,7 @@ class ObjectWriter(object):
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
InvalidObjectReference:
ZODB.POSException.InvalidObjectReference:
('Attempt to store an object from a foreign database connection',
<ZODB.serialize.DummyJar ...>, P(bob))
......@@ -260,9 +259,9 @@ class ObjectWriter(object):
>>> sam = PNewArgs('sam')
>>> writer.persistent_id(sam)
'42'
b'42'
>>> sam._p_oid
'42'
b'42'
>>> sam._p_jar is jar
True
......@@ -448,7 +447,7 @@ class ObjectWriter(object):
return NewObjectIterator(self._stack)
class NewObjectIterator(object):
class NewObjectIterator:
# The pickler is used as a forward iterator when the connection
# is looking for new objects to pickle.
......@@ -469,7 +468,7 @@ class NewObjectIterator(object):
next = __next__
class ObjectReader(object):
class ObjectReader:
def __init__(self, conn=None, cache=None, factory=None):
self._conn = conn
......@@ -514,7 +513,7 @@ class ObjectReader(object):
if not isinstance(oid, bytes):
assert isinstance(oid, str)
# this happens on Python 3 when all bytes in the oid are < 0x80
# this happens when all bytes in the oid are < 0x80
oid = oid.encode('ascii')
obj = self._cache.get(oid, None)
......@@ -552,7 +551,7 @@ class ObjectReader(object):
def load_persistent_weakref(self, oid, database_name=None):
if not isinstance(oid, bytes):
assert isinstance(oid, str)
# this happens on Python 3 when all bytes in the oid are < 0x80
# this happens when all bytes in the oid are < 0x80
oid = oid.encode('ascii')
obj = WeakRef.__new__(WeakRef)
obj.oid = oid
......@@ -574,7 +573,7 @@ class ObjectReader(object):
def load_oid(self, oid):
if not isinstance(oid, bytes):
assert isinstance(oid, str)
# this happens on Python 3 when all bytes in the oid are < 0x80
# this happens when all bytes in the oid are < 0x80
oid = oid.encode('ascii')
obj = self._cache.get(oid, None)
if obj is not None:
......@@ -597,7 +596,7 @@ class ObjectReader(object):
if isinstance(klass, tuple):
# old style reference
return "%s.%s" % klass
return "%s.%s" % (klass.__module__, klass.__name__)
return "{}.{}".format(klass.__module__, klass.__name__)
def getGhost(self, pickle):
unpickler = self._get_unpickler(pickle)
......@@ -672,7 +671,7 @@ def referencesf(p, oids=None):
if not isinstance(oid, bytes):
assert isinstance(oid, str)
# this happens on Python 3 when all bytes in the oid are < 0x80
# this happens when all bytes in the oid are < 0x80
oid = oid.encode('ascii')
oids.append(oid)
......@@ -714,7 +713,7 @@ def get_refs(a_pickle):
if not isinstance(oid, bytes):
assert isinstance(oid, str)
# this happens on Python 3 when all bytes in the oid are < 0x80
# this happens when all bytes in the oid are < 0x80
oid = oid.encode('ascii')
result.append((oid, klass))
......
......@@ -36,7 +36,7 @@ from .. import utils
class BasicStorage(RaceTests):
def checkBasics(self):
def testBasics(self):
self.assertEqual(self._storage.lastTransaction(), ZERO)
t = TransactionMetaData()
......@@ -63,7 +63,7 @@ class BasicStorage(RaceTests):
self._storage.tpc_vote, TransactionMetaData())
self._storage.tpc_abort(t)
def checkSerialIsNoneForInitialRevision(self):
def testSerialIsNoneForInitialRevision(self):
eq = self.assertEqual
oid = self._storage.new_oid()
txn = TransactionMetaData()
......@@ -79,13 +79,13 @@ class BasicStorage(RaceTests):
eq(value, MinPO(11))
eq(revid, newrevid)
def checkStore(self):
def testStore(self):
revid = ZERO
newrevid = self._dostore(revid=None)
# Finish the transaction.
self.assertNotEqual(newrevid, revid)
def checkStoreAndLoad(self):
def testStoreAndLoad(self):
eq = self.assertEqual
oid = self._storage.new_oid()
self._dostore(oid=oid, data=MinPO(7))
......@@ -99,7 +99,7 @@ class BasicStorage(RaceTests):
data, revid = utils.load_current(self._storage, oid)
eq(zodb_unpickle(data), MinPO(21))
def checkConflicts(self):
def testConflicts(self):
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
self._dostore(oid, revid=revid1, data=MinPO(12))
......@@ -107,7 +107,7 @@ class BasicStorage(RaceTests):
self._dostore,
oid, revid=revid1, data=MinPO(13))
def checkWriteAfterAbort(self):
def testWriteAfterAbort(self):
oid = self._storage.new_oid()
t = TransactionMetaData()
self._storage.tpc_begin(t)
......@@ -118,7 +118,7 @@ class BasicStorage(RaceTests):
oid = self._storage.new_oid()
self._dostore(oid=oid, data=MinPO(6))
def checkAbortAfterVote(self):
def testAbortAfterVote(self):
oid1 = self._storage.new_oid()
revid1 = self._dostore(oid=oid1, data=MinPO(-2))
oid = self._storage.new_oid()
......@@ -136,7 +136,7 @@ class BasicStorage(RaceTests):
data, _revid = utils.load_current(self._storage, oid)
self.assertEqual(revid, _revid)
def checkStoreTwoObjects(self):
def testStoreTwoObjects(self):
noteq = self.assertNotEqual
p31, p32, p51, p52 = map(MinPO, (31, 32, 51, 52))
oid1 = self._storage.new_oid()
......@@ -149,7 +149,7 @@ class BasicStorage(RaceTests):
revid4 = self._dostore(oid2, revid=revid2, data=p52)
noteq(revid3, revid4)
def checkGetTid(self):
def testGetTid(self):
if not hasattr(self._storage, 'getTid'):
return
eq = self.assertEqual
......@@ -163,7 +163,7 @@ class BasicStorage(RaceTests):
revid2 = self._dostore(oid, revid=revid1, data=p42)
eq(revid2, self._storage.getTid(oid))
def checkLen(self):
def testLen(self):
# len(storage) reports the number of objects.
# check it is zero when empty
self.assertEqual(len(self._storage), 0)
......@@ -174,27 +174,27 @@ class BasicStorage(RaceTests):
self._dostore(data=MinPO(23))
self.assertTrue(len(self._storage) in [0, 2])
def checkGetSize(self):
def testGetSize(self):
self._dostore(data=MinPO(25))
size = self._storage.getSize()
# The storage API doesn't make any claims about what size
# means except that it ought to be printable.
str(size)
def checkNote(self):
def testNote(self):
oid = self._storage.new_oid()
t = TransactionMetaData()
self._storage.tpc_begin(t)
t.note(u'this is a test')
t.note('this is a test')
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
def checkInterfaces(self):
def testInterfaces(self):
for iface in zope.interface.providedBy(self._storage):
zope.interface.verify.verifyObject(iface, self._storage)
def checkMultipleEmptyTransactions(self):
def testMultipleEmptyTransactions(self):
# There was a bug in handling empty transactions in mapping
# storage that caused the commit lock not to be released. :(
t = TransactionMetaData()
......@@ -216,7 +216,7 @@ class BasicStorage(RaceTests):
thread.join(.1)
return thread
def check_checkCurrentSerialInTransaction(self):
def test_checkCurrentSerialInTransaction(self):
oid = b'\0\0\0\0\0\0\0\xf0'
tid = self._dostore(oid)
tid2 = self._dostore(oid, revid=tid)
......@@ -297,7 +297,7 @@ class BasicStorage(RaceTests):
tid4 >
utils.load_current(self._storage, b'\0\0\0\0\0\0\0\xf4')[1])
def check_tid_ordering_w_commit(self):
def test_tid_ordering_w_commit(self):
# It's important that storages always give a consistent
# ordering for revisions, tids. This is most likely to fail
......
......@@ -61,7 +61,7 @@ class PCounter4(PCounter):
raise RuntimeError("Can't get here; not enough args")
class ConflictResolvingStorage(object):
class ConflictResolvingStorage:
def checkResolve(self, resolvable=True):
db = DB(self._storage)
......@@ -93,15 +93,15 @@ class ConflictResolvingStorage(object):
db.close()
def checkUnresolvable(self):
def testUnresolvable(self):
self.checkResolve(False)
def checkZClassesArentResolved(self):
def testZClassesArentResolved(self):
from ZODB.ConflictResolution import BadClassName
from ZODB.ConflictResolution import find_global
self.assertRaises(BadClassName, find_global, '*foobar', ())
def checkBuggyResolve1(self):
def testBuggyResolve1(self):
obj = PCounter3()
obj.inc()
......@@ -119,7 +119,7 @@ class ConflictResolvingStorage(object):
self._dostoreNP,
oid, revid=revid1, data=zodb_pickle(obj))
def checkBuggyResolve2(self):
def testBuggyResolve2(self):
obj = PCounter4()
obj.inc()
......@@ -138,9 +138,9 @@ class ConflictResolvingStorage(object):
oid, revid=revid1, data=zodb_pickle(obj))
class ConflictResolvingTransUndoStorage(object):
class ConflictResolvingTransUndoStorage:
def checkUndoConflictResolution(self):
def testUndoConflictResolution(self):
# This test is based on checkNotUndoable in the
# TransactionalUndoStorage test suite. Except here, conflict
# resolution should allow us to undo the transaction anyway.
......@@ -162,7 +162,7 @@ class ConflictResolvingTransUndoStorage(object):
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
def checkUndoUnresolvable(self):
def testUndoUnresolvable(self):
# This test is based on checkNotUndoable in the
# TransactionalUndoStorage test suite. Except here, conflict
# resolution should allow us to undo the transaction anyway.
......
......@@ -42,7 +42,7 @@ class FileStorageCorruptTests(StorageTestBase):
data, s_revid = load_current(self._storage, oid)
self.assertEqual(s_revid, revid)
def checkTruncatedIndex(self):
def testTruncatedIndex(self):
oids = self._do_stores()
self._close()
......@@ -58,7 +58,7 @@ class FileStorageCorruptTests(StorageTestBase):
self._storage = ZODB.FileStorage.FileStorage('Data.fs')
self._check_stores(oids)
def checkCorruptedIndex(self):
def testCorruptedIndex(self):
oids = self._do_stores()
self._close()
......
......@@ -24,8 +24,8 @@ from time import time
from ZODB.tests.MinPO import MinPO
class HistoryStorage(object):
def checkSimpleHistory(self):
class HistoryStorage:
def testSimpleHistory(self):
self._checkHistory((11, 12, 13))
def _checkHistory(self, data):
......
......@@ -57,12 +57,12 @@ Now if we try to load data for the objects, we get a POSKeyError:
>>> storage.load(oid0, '') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
POSKeyError: ...
ZODB.POSException.POSKeyError: ...
>>> storage.load(oid1, '') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
POSKeyError: ...
ZODB.POSException.POSKeyError: ...
We can still get the data if we load before the time we deleted.
......@@ -85,27 +85,27 @@ gone:
>>> storage.load(oid0, '') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
POSKeyError: ...
ZODB.POSException.POSKeyError: ...
>>> storage.load(oid1, '') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
POSKeyError: ...
ZODB.POSException.POSKeyError: ...
>>> storage.loadBefore(oid0, conn.root()._p_serial) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
POSKeyError: ...
ZODB.POSException.POSKeyError: ...
>>> storage.loadBefore(oid1, conn.root()._p_serial) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
POSKeyError: ...
ZODB.POSException.POSKeyError: ...
>>> storage.loadBlob(oid1, s1) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
POSKeyError: ...
ZODB.POSException.POSKeyError: ...
A conflict error is raised if the serial we provide to deleteObject
isn't current:
......@@ -123,7 +123,7 @@ isn't current:
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ConflictError: database conflict error ...
ZODB.POSException.ConflictError: database conflict error ...
>>> storage.tpc_abort(txn)
......
......@@ -29,14 +29,7 @@ from ZODB.utils import load_current
from ZODB.utils import p64
try:
from itertools import izip as zip
except ImportError:
# Py3: zip() already returns an iterable.
pass
class IteratorCompare(object):
class IteratorCompare:
def iter_verify(self, txniter, revids, val0):
eq = self.assertEqual
......@@ -58,7 +51,7 @@ class IteratorStorage(IteratorCompare):
# the task to (de)serialize extension data.
use_extension_bytes = False
def checkSimpleIteration(self):
def testSimpleIteration(self):
# Store a bunch of revisions of a single object
self._oid = oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
......@@ -68,7 +61,7 @@ class IteratorStorage(IteratorCompare):
txniter = self._storage.iterator()
self.iter_verify(txniter, [revid1, revid2, revid3], 11)
def checkUndoZombie(self):
def testUndoZombie(self):
oid = self._storage.new_oid()
self._dostore(oid, data=MinPO(94))
# Get the undo information
......@@ -93,7 +86,7 @@ class IteratorStorage(IteratorCompare):
self.assertEqual(rec.oid, oid)
self.assertEqual(rec.data, None)
def checkTransactionExtensionFromIterator(self):
def testTransactionExtensionFromIterator(self):
# It will be deserialized into a simple dict, which will be serialized
# differently. This simulates that 'dumps(loads(x), ...)' does not
# always return x.
......@@ -117,7 +110,7 @@ class IteratorStorage(IteratorCompare):
else:
self.assertNotEqual(extension_bytes, txn.extension_bytes)
def checkIterationIntraTransaction(self):
def testIterationIntraTransaction(self):
# TODO: Try this test with logging enabled. If you see something
# like
#
......@@ -140,7 +133,7 @@ class IteratorStorage(IteratorCompare):
finally:
self._storage.tpc_finish(t)
def checkLoad_was_checkLoadEx(self):
def testLoad_was_checkLoadEx(self):
oid = self._storage.new_oid()
self._dostore(oid, data=42)
data, tid = load_current(self._storage, oid)
......@@ -154,14 +147,14 @@ class IteratorStorage(IteratorCompare):
if not match:
self.fail("Could not find transaction with matching id")
def checkIterateRepeatedly(self):
def testIterateRepeatedly(self):
self._dostore()
transactions = self._storage.iterator()
self.assertEqual(1, len(list(transactions)))
# The iterator can only be consumed once:
self.assertEqual(0, len(list(transactions)))
def checkIterateRecordsRepeatedly(self):
def testIterateRecordsRepeatedly(self):
self._dostore()
it = self._storage.iterator()
tinfo = next(it)
......@@ -170,7 +163,7 @@ class IteratorStorage(IteratorCompare):
if hasattr(it, 'close'):
it.close()
def checkIterateWhileWriting(self):
def testIterateWhileWriting(self):
self._dostore()
iterator = self._storage.iterator()
# We have one transaction with 1 modified object.
......@@ -186,7 +179,7 @@ class IteratorStorage(IteratorCompare):
class ExtendedIteratorStorage(IteratorCompare):
def checkExtendedIteration(self):
def testExtendedIteration(self):
# Store a bunch of revisions of a single object
self._oid = oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
......@@ -226,7 +219,7 @@ class ExtendedIteratorStorage(IteratorCompare):
self.iter_verify(txniter, [revid3], 13)
class IteratorDeepCompare(object):
class IteratorDeepCompare:
def compare(self, storage1, storage2):
eq = self.assertEqual
......
......@@ -3,8 +3,6 @@ import sys
import threading
import time
import six
import transaction
from persistent.mapping import PersistentMapping
......@@ -44,8 +42,7 @@ class TestThread(threading.Thread):
def join(self, timeout=None):
threading.Thread.join(self, timeout)
if self._exc_info:
raise six.reraise(
self._exc_info[0], self._exc_info[1], self._exc_info[2])
raise self._exc_info[1]
class ZODBClientThread(TestThread):
......@@ -218,7 +215,7 @@ class ExtStorageClientThread(StorageClientThread):
pass
class MTStorage(object):
class MTStorage:
"Test a storage with multiple client threads executing concurrently."
def _checkNThreads(self, n, constructor, *args):
......@@ -231,21 +228,21 @@ class MTStorage(object):
self.assertFalse(t.is_alive(),
"thread failed to finish in 60 seconds")
def check2ZODBThreads(self):
def test2ZODBThreads(self):
db = ZODB.DB(self._storage)
self._checkNThreads(2, ZODBClientThread, db, self)
db.close()
def check7ZODBThreads(self):
def test7ZODBThreads(self):
db = ZODB.DB(self._storage)
self._checkNThreads(7, ZODBClientThread, db, self)
db.close()
def check2StorageThreads(self):
def test2StorageThreads(self):
self._checkNThreads(2, StorageClientThread, self._storage, self)
def check7StorageThreads(self):
def test7StorageThreads(self):
self._checkNThreads(7, StorageClientThread, self._storage, self)
def check4ExtStorageThread(self):
def test4ExtStorageThread(self):
self._checkNThreads(4, ExtStorageClientThread, self._storage, self)
......@@ -12,40 +12,24 @@
#
##############################################################################
"""A minimal persistent object to use for tests"""
import functools
from persistent import Persistent
@functools.total_ordering
class MinPO(Persistent):
def __init__(self, value=None):
self.value = value
def __cmp__(self, aMinPO):
return cmp(self.value, aMinPO.value) # noqa: F821 undefined name 'cmp'
def __hash__(self):
return hash(self.value)
# Py3: Python 3 does not support cmp() anymore. This is insane!!
def __eq__(self, aMinPO):
return self.value == aMinPO.value
def __lt__(self, aMinPO):
return self.value < aMinPO.value
# @functools.total_ordering is not available in 2.6 :-(
def __ne__(self, aMinPO):
return self.value != aMinPO.value
def __gt__(self, aMinPO):
return self.value > aMinPO.value
def __le__(self, aMinPO):
return self.value <= aMinPO.value
def __ge__(self, aMinPO):
return self.value >= aMinPO.value
def __repr__(self):
return "MinPO(%s)" % self.value
......@@ -12,10 +12,10 @@
#
##############################################################################
"""Run some tests relevant for storages that support pack()."""
from __future__ import print_function
import doctest
import time
from io import BytesIO
import transaction
from persistent import Persistent
......@@ -24,7 +24,6 @@ from persistent.mapping import PersistentMapping
import ZODB.interfaces
import ZODB.tests.util
from ZODB import DB
from ZODB._compat import BytesIO
from ZODB._compat import PersistentPickler
from ZODB._compat import Pickler
from ZODB._compat import Unpickler
......@@ -50,7 +49,7 @@ ZERO = b'\0'*8
# ids, not as the object's state. This makes the referencesf stuff work,
# because it pickle sniffs for persistent ids (so we have to get those
# persistent ids into the root object's pickle).
class Root(object):
class Root:
pass
......@@ -58,7 +57,7 @@ class Root(object):
# persistent pickling machinery -- in the dumps() function below -- will
# pickle the oid string instead of the object's actual state. Yee haw, this
# stuff is deep. ;)
class Object(object):
class Object:
def __init__(self, oid):
self._oid = oid
......@@ -69,7 +68,6 @@ class Object(object):
self.__dict__.clear()
self.__dict__.update(state)
if not isinstance(self._oid, bytes):
# Python 3
self._oid = self._oid.encode('ascii')
......@@ -108,7 +106,7 @@ def pdumps(obj):
return s.getvalue()
class PackableStorageBase(object):
class PackableStorageBase:
# We keep a cache of object ids to instances so that the unpickler can
# easily return any persistent object.
......@@ -159,7 +157,7 @@ class PackableStorageBase(object):
p.dump((PersistentMapping, None))
p.dump({'_container': {}})
t = TransactionMetaData()
t.description = u'initial database creation'
t.description = 'initial database creation'
self._storage.tpc_begin(t)
self._storage.store(ZERO, None, file.getvalue(), '', t)
self._storage.tpc_vote(t)
......@@ -177,14 +175,14 @@ class PackableStorageBase(object):
class PackableStorage(PackableStorageBase):
def checkPackEmptyStorage(self):
def testPackEmptyStorage(self):
self._storage.pack(time.time(), referencesf)
def checkPackTomorrow(self):
def testPackTomorrow(self):
self._initroot()
self._storage.pack(time.time() + 10000, referencesf)
def checkPackYesterday(self):
def testPackYesterday(self):
self._initroot()
self._storage.pack(time.time() - 10000, referencesf)
......@@ -283,15 +281,15 @@ class PackableStorage(PackableStorageBase):
db.close()
@time_monotonically_increases
def checkPackWhileWriting(self):
def testPackWhileWriting(self):
self._PackWhileWriting(pack_now=False)
@time_monotonically_increases
def checkPackNowWhileWriting(self):
def testPackNowWhileWriting(self):
self._PackWhileWriting(pack_now=True)
@time_monotonically_increases
def checkPackLotsWhileWriting(self):
def testPackLotsWhileWriting(self):
# This is like the other pack-while-writing tests, except it packs
# repeatedly until the client thread is done. At the time it was
# introduced, it reliably provoked
......@@ -329,7 +327,7 @@ class PackableStorage(PackableStorageBase):
db.close()
def checkPackWithMultiDatabaseReferences(self):
def testPackWithMultiDatabaseReferences(self):
databases = {}
db = DB(self._storage, databases=databases, database_name='')
otherdb = ZODB.tests.util.DB(databases=databases, database_name='o')
......@@ -348,7 +346,7 @@ class PackableStorage(PackableStorageBase):
otherdb.close()
db.close()
def checkPackAllRevisions(self):
def testPackAllRevisions(self):
self._initroot()
eq = self.assertEqual
raises = self.assertRaises
......@@ -387,7 +385,7 @@ class PackableStorage(PackableStorageBase):
raises(KeyError, self._storage.loadSerial, oid, revid2)
raises(KeyError, self._storage.loadSerial, oid, revid3)
def checkPackJustOldRevisions(self):
def testPackJustOldRevisions(self):
eq = self.assertEqual
raises = self.assertRaises
loads = self._makeloader()
......@@ -451,7 +449,7 @@ class PackableStorage(PackableStorageBase):
eq(pobj.getoid(), oid)
eq(pobj.value, 3)
def checkPackOnlyOneObject(self):
def testPackOnlyOneObject(self):
eq = self.assertEqual
raises = self.assertRaises
loads = self._makeloader()
......@@ -538,7 +536,7 @@ class PackableStorage(PackableStorageBase):
class PackableStorageWithOptionalGC(PackableStorage):
def checkPackAllRevisionsNoGC(self):
def testPackAllRevisionsNoGC(self):
self._initroot()
eq = self.assertEqual
raises = self.assertRaises
......@@ -579,14 +577,14 @@ class PackableStorageWithOptionalGC(PackableStorage):
class PackableUndoStorage(PackableStorageBase):
def checkPackUnlinkedFromRoot(self):
def testPackUnlinkedFromRoot(self):
eq = self.assertEqual
db = DB(self._storage)
conn = db.open()
root = conn.root()
txn = transaction.get()
txn.note(u'root')
txn.note('root')
txn.commit()
now = packtime = time.time()
......@@ -598,12 +596,12 @@ class PackableUndoStorage(PackableStorageBase):
root['obj'] = obj
txn = transaction.get()
txn.note(u'root -> o1')
txn.note('root -> o1')
txn.commit()
del root['obj']
txn = transaction.get()
txn.note(u'root -x-> o1')
txn.note('root -x-> o1')
txn.commit()
self._storage.pack(packtime, referencesf)
......@@ -612,7 +610,7 @@ class PackableUndoStorage(PackableStorageBase):
tid = log[0]['id']
db.undo(tid)
txn = transaction.get()
txn.note(u'undo root -x-> o1')
txn.note('undo root -x-> o1')
txn.commit()
conn.sync()
......@@ -620,7 +618,7 @@ class PackableUndoStorage(PackableStorageBase):
eq(root['obj'].value, 7)
@time_monotonically_increases
def checkRedundantPack(self):
def testRedundantPack(self):
# It is an error to perform a pack with a packtime earlier
# than a previous packtime. The storage can't do a full
# traversal as of the packtime, because the previous pack may
......@@ -665,7 +663,7 @@ class PackableUndoStorage(PackableStorageBase):
load_current(self._storage, lost_oid)
@time_monotonically_increases(0.1)
def checkPackUndoLog(self):
def testPackUndoLog(self):
self._initroot()
# Create a `persistent' object
obj = self._newobj()
......@@ -784,7 +782,7 @@ class ClientThread(TestThread):
conn.close()
class ElapsedTimer(object):
class ElapsedTimer:
def __init__(self, start_time):
self.start_time = start_time
......
......@@ -16,9 +16,9 @@
from ZODB.utils import load_current
class PersistentStorage(object):
class PersistentStorage:
def checkUpdatesPersist(self):
def testUpdatesPersist(self):
oids = []
def new_oid_wrapper(l=oids, new_oid=self._storage.new_oid): # noqa: E741 E501 ambiguous variable name 'l' and line too long
......
......@@ -17,7 +17,7 @@ from ZODB.POSException import Unsupported
from ZODB.utils import load_current
class ReadOnlyStorage(object):
class ReadOnlyStorage:
def _create_data(self):
# test a read-only storage that already has some data
......@@ -32,7 +32,7 @@ class ReadOnlyStorage(object):
self.open(read_only=True)
self.assertTrue(self._storage.isReadOnly())
def checkReadMethods(self):
def testReadMethods(self):
self._create_data()
self._make_readonly()
# Note that this doesn't check _all_ read methods.
......@@ -46,7 +46,7 @@ class ReadOnlyStorage(object):
except Unsupported:
pass
def checkWriteMethods(self):
def testWriteMethods(self):
self._make_readonly()
self.assertRaises(ReadOnlyError, self._storage.new_oid)
t = TransactionMetaData()
......
......@@ -30,7 +30,7 @@ from ZODB.utils import load_current
class RecoveryStorage(IteratorDeepCompare):
# Requires a setUp() that creates a self._dst destination storage
def checkSimpleRecovery(self):
def testSimpleRecovery(self):
oid = self._storage.new_oid()
revid = self._dostore(oid, data=11)
revid = self._dostore(oid, revid=revid, data=12)
......@@ -38,7 +38,7 @@ class RecoveryStorage(IteratorDeepCompare):
self._dst.copyTransactionsFrom(self._storage)
self.compare(self._storage, self._dst)
def checkRestoreAcrossPack(self):
def testRestoreAcrossPack(self):
db = DB(self._storage)
c = db.open()
r = c.root()
......@@ -69,22 +69,22 @@ class RecoveryStorage(IteratorDeepCompare):
self._dst.tpc_finish(final)
@time_monotonically_increases
def checkPackWithGCOnDestinationAfterRestore(self):
def testPackWithGCOnDestinationAfterRestore(self):
raises = self.assertRaises
db = DB(self._storage)
conn = db.open()
root = conn.root()
root.obj = obj1 = MinPO(1)
txn = transaction.get()
txn.note(u'root -> obj')
txn.note('root -> obj')
txn.commit()
root.obj.obj = obj2 = MinPO(2)
txn = transaction.get()
txn.note(u'root -> obj -> obj')
txn.note('root -> obj -> obj')
txn.commit()
del root.obj
txn = transaction.get()
txn.note(u'root -X->')
txn.note('root -X->')
txn.commit()
# Now copy the transactions to the destination
self._dst.copyTransactionsFrom(self._storage)
......@@ -97,7 +97,7 @@ class RecoveryStorage(IteratorDeepCompare):
raises(KeyError, load_current, self._dst, obj1._p_oid)
raises(KeyError, load_current, self._dst, obj2._p_oid)
def checkRestoreWithMultipleObjectsInUndoRedo(self):
def testRestoreWithMultipleObjectsInUndoRedo(self):
from ZODB.FileStorage import FileStorage
# Undo creates backpointers in (at least) FileStorage. ZODB 3.2.1
......
......@@ -25,9 +25,9 @@ from ZODB.utils import p64
from ZODB.utils import u64
class RevisionStorage(object):
class RevisionStorage:
def checkLoadSerial(self):
def testLoadSerial(self):
oid = self._storage.new_oid()
revid = ZERO
revisions = {}
......@@ -40,7 +40,7 @@ class RevisionStorage(object):
self.assertEqual(zodb_unpickle(data), value)
@time_monotonically_increases
def checkLoadBefore(self):
def testLoadBefore(self):
# Store 10 revisions of one object and then make sure that we
# can get all the non-current revisions back.
oid = self._storage.new_oid()
......@@ -71,7 +71,7 @@ class RevisionStorage(object):
self.assertEqual(revs[i-1][0], data)
self.assertEqual(tid, end)
def checkLoadBeforeEdges(self):
def testLoadBeforeEdges(self):
# Check the edges cases for a non-current load.
oid = self._storage.new_oid()
......@@ -96,7 +96,7 @@ class RevisionStorage(object):
self.assertEqual(end, revid2)
@time_monotonically_increases
def checkLoadBeforeOld(self):
def testLoadBeforeOld(self):
# Look for a very old revision. With the BaseStorage implementation
# this should require multple history() calls.
oid = self._storage.new_oid()
......@@ -114,7 +114,7 @@ class RevisionStorage(object):
# Unsure: Is it okay to assume everyone testing against RevisionStorage
# implements undo?
def checkLoadBeforeUndo(self):
def testLoadBeforeUndo(self):
# Do several transactions then undo them.
oid = self._storage.new_oid()
revid = None
......@@ -142,7 +142,7 @@ class RevisionStorage(object):
else:
self.assertEqual(None, t[2])
def checkLoadBeforeConsecutiveTids(self):
def testLoadBeforeConsecutiveTids(self):
eq = self.assertEqual
oid = self._storage.new_oid()
......@@ -167,7 +167,7 @@ class RevisionStorage(object):
eq(u64(start_tid), 1)
eq(u64(end_tid), 2)
def checkLoadBeforeCreation(self):
def testLoadBeforeCreation(self):
eq = self.assertEqual
oid1 = self._storage.new_oid()
oid2 = self._storage.new_oid()
......
......@@ -18,13 +18,12 @@ semantics (which you can override), and it also provides a helper
method _dostore() which performs a complete store transaction for a
single object revision.
"""
from __future__ import print_function
import sys
import time
from io import BytesIO
import ZODB.tests.util
from ZODB._compat import BytesIO
from ZODB._compat import PersistentPickler
from ZODB._compat import Unpickler
from ZODB._compat import _protocol
......@@ -76,7 +75,7 @@ def zodb_pickle(obj):
def persistent_load(pid):
# helper for zodb_unpickle
return "ref to %s.%s oid=%s" % (pid[1][0], pid[1][1], u64(pid[0]))
return "ref to {}.{} oid={}".format(pid[1][0], pid[1][1], u64(pid[0]))
def zodb_unpickle(data):
......@@ -103,7 +102,7 @@ def zodb_unpickle(data):
try:
klass = ns[klassname]
except KeyError:
print("can't find %s in %r" % (klassname, ns), file=sys.stderr)
print(f"can't find {klassname} in {ns!r}", file=sys.stderr)
inst = klass()
else:
raise ValueError("expected class info: %s" % repr(klass_info))
......@@ -183,7 +182,7 @@ class StorageTestBase(ZODB.tests.util.TestCase):
# Undo a tid that affects a single object (oid).
# This is very specialized.
t = TransactionMetaData()
t.note(note or u"undo")
t.note(note or "undo")
self._storage.tpc_begin(t)
undo_result = self._storage.undo(tid, t)
vote_result = self._storage.tpc_vote(t)
......
......@@ -71,7 +71,7 @@ SERIALNO = "\000" * 8
TID = "\000" * 8
class SynchronizedStorage(object):
class SynchronizedStorage:
def verifyNotCommitting(self, callable, *args):
self.assertRaises(StorageTransactionError, callable, *args)
......@@ -82,37 +82,37 @@ class SynchronizedStorage(object):
self.assertRaises(StorageTransactionError, callable, *args)
self._storage.tpc_abort(t)
def checkStoreNotCommitting(self):
def testStoreNotCommitting(self):
self.verifyNotCommitting(self._storage.store,
OID, SERIALNO, b"", "", TransactionMetaData())
def checkStoreWrongTrans(self):
def testStoreWrongTrans(self):
self.verifyWrongTrans(self._storage.store,
OID, SERIALNO, b"", "", TransactionMetaData())
def checkAbortNotCommitting(self):
def testAbortNotCommitting(self):
self._storage.tpc_abort(TransactionMetaData())
def checkAbortWrongTrans(self):
def testAbortWrongTrans(self):
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.tpc_abort(TransactionMetaData())
self._storage.tpc_abort(t)
def checkFinishNotCommitting(self):
def testFinishNotCommitting(self):
t = TransactionMetaData()
self.assertRaises(StorageTransactionError,
self._storage.tpc_finish, t)
self._storage.tpc_abort(t)
def checkFinishWrongTrans(self):
def testFinishWrongTrans(self):
t = TransactionMetaData()
self._storage.tpc_begin(t)
self.assertRaises(StorageTransactionError,
self._storage.tpc_finish, TransactionMetaData())
self._storage.tpc_abort(t)
def checkBeginCommitting(self):
def testBeginCommitting(self):
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.tpc_abort(t)
......
......@@ -17,8 +17,6 @@ Any storage that supports undo() must pass these tests.
"""
import time
from six import PY3
import transaction
from persistent import Persistent
......@@ -56,7 +54,7 @@ def listeq(L1, L2):
return sorted(L1) == sorted(L2)
class TransactionalUndoStorage(object):
class TransactionalUndoStorage:
def _multi_obj_transaction(self, objs):
t = TransactionMetaData()
......@@ -95,7 +93,7 @@ class TransactionalUndoStorage(object):
self._storage.tpc_finish(t)
return oids
def checkSimpleTransactionalUndo(self):
def testSimpleTransactionalUndo(self):
eq = self.assertEqual
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(23))
......@@ -127,7 +125,7 @@ class TransactionalUndoStorage(object):
eq(zodb_unpickle(data), MinPO(23))
self._iterate()
def checkCreationUndoneGetTid(self):
def testCreationUndoneGetTid(self):
# create an object
oid = self._storage.new_oid()
self._dostore(oid, data=MinPO(23))
......@@ -139,7 +137,7 @@ class TransactionalUndoStorage(object):
# The current version of FileStorage fails this test
self.assertRaises(KeyError, self._storage.getTid, oid)
def checkUndoCreationBranch1(self):
def testUndoCreationBranch1(self):
eq = self.assertEqual
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(11))
......@@ -161,7 +159,7 @@ class TransactionalUndoStorage(object):
b'\x7f\xff\xff\xff\xff\xff\xff\xff')
self._iterate()
def checkUndoCreationBranch2(self):
def testUndoCreationBranch2(self):
eq = self.assertEqual
oid = self._storage.new_oid()
revid = self._dostore(oid, data=MinPO(11))
......@@ -179,7 +177,7 @@ class TransactionalUndoStorage(object):
eq(zodb_unpickle(data), MinPO(12))
self._iterate()
def checkTwoObjectUndo(self):
def testTwoObjectUndo(self):
eq = self.assertEqual
# Convenience
p31, p32, p51, p52 = map(zodb_pickle,
......@@ -218,7 +216,7 @@ class TransactionalUndoStorage(object):
eq(zodb_unpickle(data), MinPO(51))
self._iterate()
def checkTwoObjectUndoAtOnce(self):
def testTwoObjectUndoAtOnce(self):
# Convenience
eq = self.assertEqual
p30, p31, p32, p50, p51, p52 = map(zodb_pickle,
......@@ -268,7 +266,7 @@ class TransactionalUndoStorage(object):
eq(zodb_unpickle(data), MinPO(52))
self._iterate()
def checkTwoObjectUndoAgain(self):
def testTwoObjectUndoAgain(self):
eq = self.assertEqual
p31, p32, p33, p51, p52, p53 = map(
zodb_pickle,
......@@ -315,7 +313,7 @@ class TransactionalUndoStorage(object):
eq(zodb_unpickle(data), MinPO(54))
self._iterate()
def checkNotUndoable(self):
def testNotUndoable(self):
eq = self.assertEqual
# Set things up so we've got a transaction that can't be undone
oid = self._storage.new_oid()
......@@ -363,7 +361,7 @@ class TransactionalUndoStorage(object):
self._storage.tpc_abort(t)
self._iterate()
def checkTransactionalUndoAfterPack(self):
def testTransactionalUndoAfterPack(self):
# bwarsaw Date: Thu Mar 28 21:04:43 2002 UTC
# This is a test which should provoke the underlying bug in
# transactionalUndo() on a standby storage. If our hypothesis
......@@ -405,7 +403,7 @@ class TransactionalUndoStorage(object):
self.assertEqual(zodb_unpickle(data), MinPO(52))
self._iterate()
def checkTransactionalUndoAfterPackWithObjectUnlinkFromRoot(self):
def testTransactionalUndoAfterPackWithObjectUnlinkFromRoot(self):
eq = self.assertEqual
db = DB(self._storage)
conn = db.open()
......@@ -417,7 +415,7 @@ class TransactionalUndoStorage(object):
root['obj'] = o1
o1.obj = o2
txn = transaction.get()
txn.note(u'o1 -> o2')
txn.note('o1 -> o2')
txn.commit()
now = packtime = time.time()
while packtime <= now:
......@@ -426,12 +424,12 @@ class TransactionalUndoStorage(object):
o3 = C()
o2.obj = o3
txn = transaction.get()
txn.note(u'o1 -> o2 -> o3')
txn.note('o1 -> o2 -> o3')
txn.commit()
o1.obj = o3
txn = transaction.get()
txn.note(u'o1 -> o3')
txn.note('o1 -> o3')
txn.commit()
log = self._storage.undoLog()
......@@ -449,7 +447,7 @@ class TransactionalUndoStorage(object):
tid = log[0]['id']
db.undo(tid)
txn = transaction.get()
txn.note(u'undo')
txn.note('undo')
txn.commit()
# undo does a txn-undo, but doesn't invalidate
conn.sync()
......@@ -465,7 +463,7 @@ class TransactionalUndoStorage(object):
conn.close()
db.close()
def checkPackAfterUndoDeletion(self):
def testPackAfterUndoDeletion(self):
db = DB(self._storage)
cn = db.open()
try:
......@@ -481,14 +479,14 @@ class TransactionalUndoStorage(object):
root["key1"] = MinPO(1)
root["key2"] = MinPO(2)
txn = transaction.get()
txn.note(u"create 3 keys")
txn.note("create 3 keys")
txn.commit()
set_pack_time()
del root["key1"]
txn = transaction.get()
txn.note(u"delete 1 key")
txn.note("delete 1 key")
txn.commit()
set_pack_time()
......@@ -500,7 +498,7 @@ class TransactionalUndoStorage(object):
L = db.undoInfo()
db.undo(L[0]["id"])
txn = transaction.get()
txn.note(u"undo deletion")
txn.note("undo deletion")
txn.commit()
set_pack_time()
......@@ -524,7 +522,7 @@ class TransactionalUndoStorage(object):
cn.close()
db.close()
def checkPackAfterUndoManyTimes(self):
def testPackAfterUndoManyTimes(self):
db = DB(self._storage)
cn = db.open()
try:
......@@ -536,7 +534,7 @@ class TransactionalUndoStorage(object):
transaction.commit()
rt["test"] = MinPO(3)
txn = transaction.get()
txn.note(u"root of undo")
txn.note("root of undo")
txn.commit()
packtimes = []
......@@ -544,7 +542,7 @@ class TransactionalUndoStorage(object):
L = db.undoInfo()
db.undo(L[0]["id"])
txn = transaction.get()
txn.note(u"undo %d" % i)
txn.note("undo %d" % i)
txn.commit()
rt._p_deactivate()
cn.sync()
......@@ -576,7 +574,7 @@ class TransactionalUndoStorage(object):
# most other storages dont.
pass
def checkTransactionalUndoIterator(self):
def testTransactionalUndoIterator(self):
# check that data_txn set in iterator makes sense
if not hasattr(self._storage, "iterator"):
return
......@@ -661,12 +659,12 @@ class TransactionalUndoStorage(object):
self.assertRaises(StopIteration, next, transactions)
def checkUndoLogMetadata(self):
def testUndoLogMetadata(self):
# test that the metadata is correct in the undo log
t = transaction.get()
t.note(u't1')
t.note('t1')
t.setExtendedInfo('k2', 'this is transaction metadata')
t.setUser(u'u3', path=u'p3')
t.setUser('u3', path='p3')
db = DB(self._storage)
conn = db.open()
try:
......@@ -739,13 +737,13 @@ class TransactionalUndoStorage(object):
# before a ZRS secondary even starts, and then the latter can't
# find a server to recover from).
def checkIndicesInUndoInfo(self):
def testIndicesInUndoInfo(self):
self._exercise_info_indices("undoInfo")
def checkIndicesInUndoLog(self):
def testIndicesInUndoLog(self):
self._exercise_info_indices("undoLog")
def checkUndoMultipleConflictResolution(self, reverse=False):
def testUndoMultipleConflictResolution(self, reverse=False):
from .ConflictResolution import PCounter
db = DB(self._storage)
cn = db.open()
......@@ -755,8 +753,7 @@ class TransactionalUndoStorage(object):
for i in range(4):
with db.transaction() as conn:
conn.transaction_manager.get().note(
(str if PY3 else unicode)(i)) # noqa: F821 undef name
conn.transaction_manager.get().note(str(i))
conn.root.x.inc()
ids = [log['id'] for log in db.undoLog(1, 3)]
......@@ -771,5 +768,5 @@ class TransactionalUndoStorage(object):
cn.close()
db.close()
def checkUndoMultipleConflictResolutionReversed(self):
self.checkUndoMultipleConflictResolution(True)
def testUndoMultipleConflictResolutionReversed(self):
self.testUndoMultipleConflictResolution(True)
......@@ -29,7 +29,7 @@ A blob implements the IBlob interface::
We can open a new blob file for reading, but it won't have any data::
>>> with myblob.open("r") as fp: fp.read()
''
b''
But we can write data to a new Blob by opening it for writing::
......@@ -41,7 +41,7 @@ If we try to open a Blob again while it is open for writing, we get an error::
>>> myblob.open("r")
Traceback (most recent call last):
...
BlobError: Already opened for writing.
ZODB.interfaces.BlobError: Already opened for writing.
We can close the file::
......@@ -54,20 +54,20 @@ Now we can open it for reading::
And we get the data back::
>>> f2.read()
'Hi, Blob!'
b'Hi, Blob!'
If we want to, we can open it again::
>>> f3 = myblob.open("r")
>>> f3.read()
'Hi, Blob!'
b'Hi, Blob!'
But we can't open it for writing, while it is opened for reading::
>>> myblob.open("a")
Traceback (most recent call last):
...
BlobError: Already opened for reading.
ZODB.interfaces.BlobError: Already opened for reading.
Before we can write, we have to close the readers::
......@@ -84,12 +84,12 @@ We can't open a blob while it is open for writing:
>>> myblob.open("w")
Traceback (most recent call last):
...
BlobError: Already opened for writing.
ZODB.interfaces.BlobError: Already opened for writing.
>>> myblob.open("r")
Traceback (most recent call last):
...
BlobError: Already opened for writing.
ZODB.interfaces.BlobError: Already opened for writing.
>>> f4.close()
......@@ -97,7 +97,7 @@ Now we can read it::
>>> f4a = myblob.open("r")
>>> f4a.read()
'Hi, Blob!\nBlob is fine.'
b'Hi, Blob!\nBlob is fine.'
>>> f4a.close()
You shouldn't need to explicitly close a blob unless you hold a reference
......@@ -106,7 +106,7 @@ around via a name, the second call to open it in a writable mode would fail
with a BlobError, but it doesn't::
>>> with myblob.open("r+") as fp: fp.read()
'Hi, Blob!\nBlob is fine.'
b'Hi, Blob!\nBlob is fine.'
>>> f4b = myblob.open("a")
>>> f4b.close()
......@@ -114,9 +114,9 @@ We can read lines out of the blob too::
>>> f5 = myblob.open("r")
>>> f5.readline()
'Hi, Blob!\n'
b'Hi, Blob!\n'
>>> f5.readline()
'Blob is fine.'
b'Blob is fine.'
>>> f5.close()
We can seek to certain positions in a blob and read portions of it::
......@@ -126,7 +126,7 @@ We can seek to certain positions in a blob and read portions of it::
>>> int(f6.tell())
4
>>> f6.read(5)
'Blob!'
b'Blob!'
>>> f6.close()
We can use the object returned by a blob open call as an iterable::
......@@ -146,7 +146,7 @@ We can truncate a blob::
>>> f8.close()
>>> f8 = myblob.open('r')
>>> f8.read()
''
b''
>>> f8.close()
Blobs are always opened in binary mode::
......@@ -159,13 +159,12 @@ Blobs are always opened in binary mode::
Blobs that have not been committed can be opened using any mode,
except for "c"::
>>> import six
>>> from ZODB.blob import BlobError, valid_modes
>>> for mode in valid_modes:
... try:
... f10 = Blob().open(mode)
... except BlobError:
... six.print_('open failed with mode "%s"' % mode)
... print('open failed with mode "%s"' % mode)
... else:
... f10.close()
open failed with mode "c"
......@@ -195,4 +194,4 @@ constructor. (This is a convenience, mostly for writing tests.)
>>> myblob = Blob(b'some data')
>>> with myblob.open() as fp: fp.read()
'some data'
b'some data'
......@@ -44,7 +44,7 @@ Getting stuff out of there works similarly:
>>> IBlob.providedBy(blob2)
True
>>> with blob2.open("r") as fp: fp.read()
"I'm a happy Blob."
b"I'm a happy Blob."
>>> transaction2.abort()
MVCC also works.
......@@ -56,7 +56,7 @@ MVCC also works.
>>> f.close()
>>> transaction.commit()
>>> with connection3.root()['myblob'].open('r') as fp: fp.read()
"I'm a happy Blob."
b"I'm a happy Blob."
>>> transaction2.abort()
>>> transaction3.abort()
......@@ -74,7 +74,7 @@ You can't put blobs into a database that has uses a Non-Blob-Storage, though:
>>> transaction2.commit() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
Unsupported: Storing Blobs in ...
ZODB.POSException.Unsupported: Storing Blobs in ...
>>> transaction2.abort()
>>> connection2.close()
......
......@@ -29,7 +29,7 @@ We now can call open on the blob and read and write the data::
>>> blob_read = blob.open('r')
>>> blob_read.read()
"I'm a Blob and I feel fine."
b"I'm a Blob and I feel fine."
>>> blob_read.close()
>>> blob_write = blob.open('w')
>>> _ = blob_write.write(b'I was changed.')
......@@ -43,12 +43,12 @@ already::
>>> blob_read = blob.open('r')
>>> blob.consumeFile('to_import')
Traceback (most recent call last):
BlobError: Already opened for reading.
ZODB.interfaces.BlobError: Already opened for reading.
>>> blob_read.close()
>>> blob_write = blob.open('w')
>>> blob.consumeFile('to_import')
Traceback (most recent call last):
BlobError: Already opened for writing.
ZODB.interfaces.BlobError: Already opened for writing.
>>> blob_write.close()
Now, after closing all readers and writers we can consume files again::
......@@ -56,7 +56,7 @@ Now, after closing all readers and writers we can consume files again::
>>> blob.consumeFile('to_import')
>>> blob_read = blob.open('r')
>>> blob_read.read()
'I am another blob.'
b'I am another blob.'
>>> blob_read.close()
......@@ -85,7 +85,7 @@ back to try a copy/remove operation that is successfull::
The blob did not have data before, so it shouldn't have data now::
>>> with blob.open('r') as fp: fp.read()
'Some data.'
b'Some data.'
Case 2: We don't have uncommitted data and both the link operation and the
copy fail. The exception will be re-raised and the target file will not
......@@ -108,7 +108,7 @@ exist::
The blob did not have data before, so it shouldn't have data now::
>>> with blob.open('r') as fp: fp.read()
''
b''
Case 3: We have uncommitted data, but the link and the copy operations fail.
The exception will be re-raised and the target file will exist with the
......@@ -126,7 +126,7 @@ The blob did existed before and had uncommitted data, this shouldn't have
changed::
>>> with blob.open('r') as fp: fp.read()
'Uncommitted data'
b'Uncommitted data'
>>> os.rename = os_rename
>>> ZODB.utils.cp = utils_cp
......@@ -32,10 +32,10 @@ entries per directory level:
>>> import os
>>> bushy.path_to_oid(os.path.join(
... '0x01', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'))
'\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x01\x00\x00\x00\x00\x00\x00\x00'
>>> bushy.path_to_oid(os.path.join(
... '0xff', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00', '0x00'))
'\xff\x00\x00\x00\x00\x00\x00\x00'
b'\xff\x00\x00\x00\x00\x00\x00\x00'
Paths that do not represent an OID will cause a ValueError:
......@@ -60,7 +60,7 @@ of blobs at the same time (e.g. 32k on ext3).
'0x01'
>>> lawn.path_to_oid('0x01')
'\x00\x00\x00\x00\x00\x00\x00\x01'
b'\x00\x00\x00\x00\x00\x00\x00\x01'
Paths that do not represent an OID will cause a ValueError:
......@@ -142,7 +142,7 @@ it will leave a marker with the choosen layout if no marker exists yet:
'bushy'
>>> fsh.create()
>>> with open(os.path.join(blobs, LAYOUT_MARKER), 'rb') as fp: fp.read()
'bushy'
b'bushy'
If the FSH finds a marker, then it verifies whether its content matches the
strategy that was chosen. It will raise an exception if we try to work with a
......@@ -172,7 +172,7 @@ the marker will be used in the future:
>>> blob_storage.fshelper.layout_name
'lawn'
>>> with open(os.path.join(blobs, LAYOUT_MARKER), 'rb') as fp: fp.read()
'lawn'
b'lawn'
>>> blob_storage = BlobStorage('blobs', base_storage, layout='bushy')
... # doctest: +ELLIPSIS
Traceback (most recent call last):
......@@ -248,14 +248,13 @@ with the same sizes and permissions:
>>> len(lawn_files) == len(bushy_files)
True
>>> import six
>>> for file_name, lawn_path in sorted(lawn_files.items()):
... if file_name == '.layout':
... continue
... lawn_stat = os.stat(lawn_path)
... bushy_path = bushy_files[file_name]
... bushy_stat = os.stat(bushy_path)
... six.print_(lawn_path, '-->', bushy_path)
... print(lawn_path, '-->', bushy_path)
... if ((lawn_stat.st_mode, lawn_stat.st_size) !=
... (bushy_stat.st_mode, bushy_stat.st_size)):
... print('oops')
......@@ -289,7 +288,7 @@ True
... lawn_stat = os.stat(lawn_path)
... bushy_path = bushy_files[file_name]
... bushy_stat = os.stat(bushy_path)
... six.print_(bushy_path, '-->', lawn_path)
... print(bushy_path, '-->', lawn_path)
... if ((lawn_stat.st_mode, lawn_stat.st_size) !=
... (bushy_stat.st_mode, bushy_stat.st_size)):
... print('oops')
......
......@@ -29,7 +29,7 @@ Aborting a blob add leaves the blob unchanged:
>>> blob1._p_jar
>>> with blob1.open() as fp:
... fp.read()
'this is blob 1'
b'this is blob 1'
It doesn't clear the file because there is no previously committed version:
......@@ -54,7 +54,7 @@ state:
... _ = file.write(b'this is new blob 1')
>>> with blob1.open() as fp:
... fp.read()
'this is new blob 1'
b'this is new blob 1'
>>> fname = blob1._p_blob_uncommitted
>>> os.path.exists(fname)
True
......@@ -66,7 +66,7 @@ state:
>>> with blob1.open() as fp:
... fp.read()
'this is blob 1'
b'this is blob 1'
Opening a blob gives us a filehandle. Getting data out of the
resulting filehandle is accomplished via the filehandle's read method::
......@@ -77,7 +77,7 @@ resulting filehandle is accomplished via the filehandle's read method::
>>> blob1afh1 = blob1a.open("r")
>>> blob1afh1.read()
'this is blob 1'
b'this is blob 1'
Let's make another filehandle for read only to blob1a. Each file
handle has a reference to the (same) underlying blob::
......@@ -95,7 +95,7 @@ opened are still open::
>>> transaction.abort()
>>> blob1afh2.read()
'this is blob 1'
b'this is blob 1'
>>> blob1afh2.close()
......@@ -108,7 +108,7 @@ when we start)::
False
>>> with blob1a.open('r') as fp:
... fp.read()
'this is blob 1'
b'this is blob 1'
>>> with blob1a.open('a') as blob1afh3:
... assert(bool(blob1a._p_changed))
... _ = blob1afh3.write(b'woot!')
......@@ -129,13 +129,13 @@ blob2 (a different object) should be evident::
>>> with blob1.open('r') as fp:
... fp.read()
'this is blob 1woot!'
b'this is blob 1woot!'
>>> with blob1a.open('r') as fp:
... fp.read()
'this is blob 1woot!'
b'this is blob 1woot!'
>>> with blob2.open('r') as fp:
... fp.read()
'this is blob 3'
b'this is blob 3'
We shouldn't be able to persist a blob filehandle at commit time
(although the exception which is raised when an object cannot be
......@@ -169,22 +169,22 @@ connections should result in a write conflict error::
>>> tm1.commit()
>>> with root3['blob1'].open('r') as fp:
... fp.read()
'this is blob 1woot!this is from connection 3'
b'this is blob 1woot!this is from connection 3'
>>> tm2.commit()
Traceback (most recent call last):
...
ConflictError: database conflict error (oid 0x01, class ZODB.blob.Blob...)
ZODB.POSException.ConflictError: database conflict error (oid 0x01, class ZODB.blob.Blob...)
After the conflict, the winning transaction's result is visible on both
connections::
>>> with root3['blob1'].open('r') as fp:
... fp.read()
'this is blob 1woot!this is from connection 3'
b'this is blob 1woot!this is from connection 3'
>>> tm2.abort()
>>> with root4['blob1'].open('r') as fp:
... fp.read()
'this is blob 1woot!this is from connection 3'
b'this is blob 1woot!this is from connection 3'
You can't commit a transaction while blob files are open:
......@@ -221,17 +221,17 @@ We do support optimistic savepoints:
>>> transaction.commit()
>>> with root5['blob'].open("r") as fp:
... fp.read()
"I'm a happy blob."
b"I'm a happy blob."
>>> with root5['blob'].open("a") as blob_fh:
... _ = blob_fh.write(b" And I'm singing.")
>>> with root5['blob'].open("r") as fp:
... fp.read()
"I'm a happy blob. And I'm singing."
b"I'm a happy blob. And I'm singing."
>>> savepoint = transaction.savepoint(optimistic=True)
>>> with root5['blob'].open("r") as fp:
... fp.read()
"I'm a happy blob. And I'm singing."
b"I'm a happy blob. And I'm singing."
Savepoints store the blobs in temporary directories in the temporary
directory of the blob storage:
......@@ -254,7 +254,7 @@ We support non-optimistic savepoints too:
... _ = file.write(b" And I'm dancing.")
>>> with root5['blob'].open("r") as fp:
... fp.read()
"I'm a happy blob. And I'm singing. And I'm dancing."
b"I'm a happy blob. And I'm singing. And I'm dancing."
>>> savepoint = transaction.savepoint()
Again, the savepoint creates a new savepoints directory:
......@@ -269,7 +269,7 @@ Again, the savepoint creates a new savepoints directory:
>>> with root5['blob'].open("r") as fp:
... fp.read()
"I'm a happy blob. And I'm singing. And I'm dancing."
b"I'm a happy blob. And I'm singing. And I'm dancing."
>>> transaction.abort()
The savepoint blob directory gets cleaned up on an abort:
......@@ -309,11 +309,12 @@ and doesn't prevent us from opening the blob for writing:
>>> with blob.open('w') as file:
... _ = file.write(b'x')
>>> with blob.open() as fp: fp.read()
'x'
>>> with blob.open() as fp:
... fp.read()
b'x'
>>> f.read()
"I'm a happy blob."
b"I'm a happy blob."
>>> f.close()
>>> transaction.abort()
......@@ -325,12 +326,12 @@ uncommitted changes:
>>> blob.committed()
Traceback (most recent call last):
...
BlobError: Uncommitted changes
ZODB.interfaces.BlobError: Uncommitted changes
>>> blob.open('c')
Traceback (most recent call last):
...
BlobError: Uncommitted changes
ZODB.interfaces.BlobError: Uncommitted changes
>>> with blob.open('w') as file:
... _ = file.write(b"I'm a happy blob.")
......@@ -338,23 +339,23 @@ uncommitted changes:
>>> blob.committed()
Traceback (most recent call last):
...
BlobError: Uncommitted changes
ZODB.interfaces.BlobError: Uncommitted changes
>>> blob.open('c')
Traceback (most recent call last):
...
BlobError: Uncommitted changes
ZODB.interfaces.BlobError: Uncommitted changes
>>> s = transaction.savepoint()
>>> blob.committed()
Traceback (most recent call last):
...
BlobError: Uncommitted changes
ZODB.interfaces.BlobError: Uncommitted changes
>>> blob.open('c')
Traceback (most recent call last):
...
BlobError: Uncommitted changes
ZODB.interfaces.BlobError: Uncommitted changes
>>> transaction.commit()
>>> with open(blob.committed()) as fp:
......@@ -365,8 +366,7 @@ You can't open a committed blob file for writing:
>>> try:
... open(blob.committed(), 'w') # doctest: +ELLIPSIS
... except:
... # Produces IOError in Py2 and PermissionError in Py3
... except PermissionError:
... print('Error raised.')
Error raised.
......@@ -409,7 +409,7 @@ Similarly, the new object wasn't added to the storage:
>>> blob_storage.load(new_oid, '')
Traceback (most recent call last):
...
POSKeyError: 0x...
ZODB.POSException.POSKeyError: 0x...
.. clean up
......
......@@ -140,16 +140,15 @@ is reset:
>>> blob_storage._blobs_pack_is_in_progress = True
>>> blob_storage.pack(packtime, referencesf)
Traceback (most recent call last):
BlobStorageError: Already packing
ZODB.blob.BlobStorageError: Already packing
>>> blob_storage._blobs_pack_is_in_progress = False
>>> blob_storage.pack(packtime, referencesf)
We can also see, that the flag is set during the pack, by leveraging the
knowledge that the underlying storage's pack method is also called:
>>> import six
>>> def dummy_pack(time, ref):
... six.print_(
... print(
... "_blobs_pack_is_in_progress =",
... blob_storage._blobs_pack_is_in_progress)
... return base_pack(time, ref)
......
......@@ -13,7 +13,6 @@
##############################################################################
"""Functional test to produce a dangling reference."""
from __future__ import print_function
import time
......@@ -32,19 +31,19 @@ def create_dangling_ref(db):
rt = db.open().root()
rt[1] = o1 = P()
transaction.get().note(u"create o1")
transaction.get().note("create o1")
transaction.commit()
rt[2] = o2 = P()
transaction.get().note(u"create o2")
transaction.get().note("create o2")
transaction.commit()
c = o1.child = P()
transaction.get().note(u"set child on o1")
transaction.get().note("set child on o1")
transaction.commit()
o1.child = P()
transaction.get().note(u"replace child on o1")
transaction.get().note("replace child on o1")
transaction.commit()
time.sleep(2)
......@@ -55,12 +54,12 @@ def create_dangling_ref(db):
print(repr(c._p_oid))
o2.child = c
transaction.get().note(u"set child on o2")
transaction.get().note("set child on o2")
transaction.commit()
def main():
fs = FileStorage(u"dangle.fs")
fs = FileStorage("dangle.fs")
db = DB(fs)
create_dangling_ref(db)
db.close()
......
......@@ -30,8 +30,7 @@ Open one more, and we get a warning:
>>> len(handler.records)
1
>>> msg = handler.records[0]
>>> import six
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
>>> print(msg.name, msg.levelname, msg.getMessage())
ZODB.DB WARNING DB.open() has 8 open connections with a pool_size of 7
Open 6 more, and we get 6 more warnings:
......@@ -42,7 +41,7 @@ Open 6 more, and we get 6 more warnings:
>>> len(handler.records)
7
>>> msg = handler.records[-1]
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
>>> print(msg.name, msg.levelname, msg.getMessage())
ZODB.DB WARNING DB.open() has 14 open connections with a pool_size of 7
Add another, so that it's more than twice the default, and the level
......@@ -54,7 +53,7 @@ rises to critical:
>>> len(handler.records)
8
>>> msg = handler.records[-1]
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
>>> print(msg.name, msg.levelname, msg.getMessage())
ZODB.DB CRITICAL DB.open() has 15 open connections with a pool_size of 7
While it's boring, it's important to verify that the same relationships
......@@ -75,7 +74,7 @@ A warning for opening one more:
>>> len(handler.records)
1
>>> msg = handler.records[0]
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
>>> print(msg.name, msg.levelname, msg.getMessage())
ZODB.DB WARNING DB.open() has 3 open connections with a pool_size of 2
More warnings through 4 connections:
......@@ -86,7 +85,7 @@ More warnings through 4 connections:
>>> len(handler.records)
2
>>> msg = handler.records[-1]
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
>>> print(msg.name, msg.levelname, msg.getMessage())
ZODB.DB WARNING DB.open() has 4 open connections with a pool_size of 2
And critical for going beyond that:
......@@ -97,7 +96,7 @@ And critical for going beyond that:
>>> len(handler.records)
3
>>> msg = handler.records[-1]
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
>>> print(msg.name, msg.levelname, msg.getMessage())
ZODB.DB CRITICAL DB.open() has 5 open connections with a pool_size of 2
We can change the pool size on the fly:
......@@ -111,7 +110,7 @@ We can change the pool size on the fly:
>>> len(handler.records)
1
>>> msg = handler.records[0]
>>> six.print_(msg.name, msg.levelname, msg.getMessage())
>>> print(msg.name, msg.levelname, msg.getMessage())
ZODB.DB WARNING DB.open() has 7 open connections with a pool_size of 6
Enough of that.
......
......@@ -22,7 +22,7 @@ import ZODB.utils
@zope.interface.implementer(ZODB.interfaces.IStorageWrapper)
class HexStorage(object):
class HexStorage:
copied_methods = (
'close', 'getName', 'getSize', 'history', 'isReadOnly',
......@@ -139,7 +139,7 @@ class ServerHexStorage(HexStorage):
)
class Transaction(object):
class Transaction:
def __init__(self, store, trans):
self.__store = store
......@@ -155,7 +155,7 @@ class Transaction(object):
return getattr(self.__trans, name)
class ZConfigHex(object):
class ZConfigHex:
_factory = HexStorage
......
......@@ -29,10 +29,9 @@ Then, any log output is collected in the handler:
>>> logging.getLogger('foo.bar').exception('eek')
>>> logging.getLogger('foo.bar').info('blah blah')
>>> import six
>>> for record in handler.records:
... six.print_(record.name, record.levelname)
... six.print_(' ', record.getMessage())
... print(record.name, record.levelname)
... print(' ', record.getMessage())
foo.bar ERROR
eek
foo.bar INFO
......
......@@ -166,12 +166,7 @@ to test that here; this is ugly:
>>> from ZODB.config import getDbSchema
>>> import ZConfig
>>> try:
... from cStringIO import StringIO
... except ImportError:
... # Py3
... from io import StringIO
>>> from io import StringIO
Derive a new `config2` string from the `config` string, specifying a
different database_name:
......
......@@ -40,7 +40,6 @@ between load/open and local invalidations to catch bugs similar to
https://github.com/zopefoundation/ZODB/issues/290 and
https://github.com/zopefoundation/ZEO/issues/166.
"""
from __future__ import print_function
import threading
from random import randint
......@@ -136,13 +135,13 @@ class T2ObjectsInc2Phase:
(i1, i2, p))
class RaceTests(object):
class RaceTests:
# verify storage/Connection for race in between load/open and local
# invalidations.
# https://github.com/zopefoundation/ZEO/issues/166
# https://github.com/zopefoundation/ZODB/issues/290
def check_race_loadopen_vs_local_invalidate(self):
def test_race_loadopen_vs_local_invalidate(self):
return self._check_race_loadopen_vs_local_invalidate(T2ObjectsInc())
@with_high_concurrency
......@@ -240,7 +239,7 @@ class RaceTests(object):
# This test is similar to check_race_loadopen_vs_local_invalidate but does
# not reuse its code because the probability to reproduce external
# invalidation bug with only 1 mutator + 1 verifier is low.
def check_race_load_vs_external_invalidate(self):
def test_race_load_vs_external_invalidate(self):
return self._check_race_load_vs_external_invalidate(T2ObjectsInc())
@with_high_concurrency
......@@ -286,7 +285,7 @@ class RaceTests(object):
try:
spec.assertStateOK(root)
except AssertionError as e:
msg = "T%s: %s\n" % (tx, e)
msg = "T{}: {}\n".format(tx, e)
msg += _state_details(root)
tg.fail(msg)
......@@ -335,7 +334,7 @@ class RaceTests(object):
# T2ObjectsInc2Phase the invariant will be detected to be broken on the
# next transaction.
@long_test
def check_race_external_invalidate_vs_disconnect(self):
def test_race_external_invalidate_vs_disconnect(self):
return self._check_race_external_invalidate_vs_disconnect(
T2ObjectsInc2Phase())
......@@ -367,7 +366,7 @@ class RaceTests(object):
try:
spec.assertStateOK(root)
except AssertionError as e:
msg = "T%s: %s\n" % (tx, e)
msg = "T{}: {}\n".format(tx, e)
msg += _state_details(root)
tg.fail(msg)
......@@ -440,7 +439,7 @@ def _state_details(root): # -> txt
# serial for all objects
keys = list(sorted(root.keys()))
txt = ''
txt += ' '.join('%s._p_serial: %s' % (k, tid_repr(root[k]._p_serial))
txt += ' '.join('{}._p_serial: {}'.format(k, tid_repr(root[k]._p_serial))
for k in keys)
txt += '\n'
......@@ -463,7 +462,7 @@ def _state_details(root): # -> txt
load_txt += 'None'
else:
_, serial, next_serial = x
load_txt += 'serial: %s next_serial: %s' % (
load_txt += 'serial: {} next_serial: {}'.format(
tid_repr(serial), tid_repr(next_serial))
load_txt += '\n'
return load_txt
......@@ -483,7 +482,7 @@ def _state_details(root): # -> txt
return txt
class TestWorkGroup(object):
class TestWorkGroup:
"""TestWorkGroup represents group of threads that run together to verify
something.
......@@ -561,7 +560,7 @@ class Daemon(threading.Thread):
get intermixed and facilitates the exception analysis.
"""
def __init__(self, **kw):
super(Daemon, self).__init__(**kw)
super().__init__(**kw)
self.daemon = True
if hasattr(self, "_invoke_excepthook"):
# Python 3.8+
......@@ -597,7 +596,7 @@ class Daemon(threading.Thread):
self.run = run
def join(self, *args, **kw):
super(Daemon, self).join(*args, **kw)
super().join(*args, **kw)
if self.is_alive():
raise AssertionError("Thread %s did not stop" % self.name)
......@@ -606,7 +605,7 @@ class Daemon(threading.Thread):
exc_lock = threading.Lock()
class WaitGroup(object):
class WaitGroup:
"""WaitGroup provides service to wait for spawned workers to be done.
- .add() adds workers
......
......@@ -15,7 +15,7 @@
"""
class DataManager(object):
class DataManager:
"""Sample data manager
This class provides a trivial data-manager implementation and doc
......@@ -386,7 +386,7 @@ class DataManager(object):
return Rollback(self)
class Rollback(object):
class Rollback:
def __init__(self, dm):
self.dm = dm
......
from __future__ import print_function
import getopt
import os
import string
......@@ -122,7 +120,7 @@ def main(args):
jar.close()
t = time.time()-t
if detailed:
sys.stderr.write("%s\t%s\t%.4f\n" % (j, r, t))
sys.stderr.write("{}\t{}\t{:.4f}\n".format(j, r, t))
sys.stdout.flush()
results[r] = results[r]+t
rt = d = p = v = None # release all references
......@@ -134,7 +132,7 @@ def main(args):
print('-'*24)
for r in 1, 10, 100, 1000:
t = results[r]/nrep
sys.stderr.write("mean:\t%s\t%.4f\t%.4f (s/o)\n" % (r, t, t/r))
sys.stderr.write("mean:\t{}\t{:.4f}\t{:.4f} (s/o)\n".format(r, t, t/r))
db.close()
......
......@@ -24,7 +24,7 @@ import unittest
from ZODB.ActivityMonitor import ActivityMonitor
class FakeConnection(object):
class FakeConnection:
loads = 0
stores = 0
......@@ -101,4 +101,4 @@ class Tests(unittest.TestCase):
def test_suite():
return unittest.makeSuite(Tests)
return unittest.defaultTestLoader.loadTestsFromTestCase(Tests)
......@@ -74,7 +74,7 @@ def test_integration():
>>> a3 = conn3.root()['a']
>>> a3 # doctest: +NORMALIZE_WHITESPACE
<persistent broken ZODB.not.there.Atall instance
'\x00\x00\x00\x00\x00\x00\x00\x01'>
b'\x00\x00\x00\x00\x00\x00\x00\x01'>
>>> a3.__Broken_state__
{'x': 1}
......
......@@ -117,13 +117,13 @@ class DBMethods(CacheTestBase):
old_size = self.db.cacheSize()
self.db.cacheFullSweep()
new_size = self.db.cacheSize()
self.assertTrue(new_size < old_size, "%s < %s" % (old_size, new_size))
self.assertTrue(new_size < old_size, f"{old_size} < {new_size}")
def testMinimize(self):
old_size = self.db.cacheSize()
self.db.cacheMinimize()
new_size = self.db.cacheSize()
self.assertTrue(new_size < old_size, "%s < %s" % (old_size, new_size))
self.assertTrue(new_size < old_size, f"{old_size} < {new_size}")
def testMinimizeTerminates(self):
# This is tricky. cPickleCache had a case where it could get into
......@@ -312,7 +312,7 @@ class LRUCacheTests(CacheTestBase):
self.assertTrue(details['rc'] > 0)
class StubDataManager(object):
class StubDataManager:
def setklassstate(self, object):
pass
......@@ -552,8 +552,8 @@ size correctly:
def test_suite():
s = unittest.makeSuite(DBMethods)
s.addTest(unittest.makeSuite(LRUCacheTests))
s.addTest(unittest.makeSuite(CacheErrors))
s = unittest.defaultTestLoader.loadTestsFromTestCase(DBMethods)
s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(LRUCacheTests))
s.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(CacheErrors))
s.addTest(doctest.DocTestSuite())
return s
......@@ -178,7 +178,7 @@ def multi_atabases():
... ''') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ConfigurationSyntaxError:
ZConfig.ConfigurationSyntaxError:
section names must not be re-used within the same container:'1' (line 9)
>>> ZODB.config.databaseFromString('''
......@@ -204,9 +204,6 @@ def test_suite():
setUp=ZODB.tests.util.setUp,
tearDown=ZODB.tests.util.tearDown,
checker=ZODB.tests.util.checker))
suite.addTest(unittest.makeSuite(ZODBConfigTest))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(
ZODBConfigTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
This diff is collapsed.
This diff is collapsed.
......@@ -64,7 +64,6 @@ Now, we'll define a validation function to validate an account:
And a function to apply entries. If the function fails in some unexpected
way, it rolls back all of its changes and prints the error:
>>> import six
>>> def apply_entries(entries):
... savepoint = transaction.savepoint()
... try:
......@@ -75,12 +74,12 @@ way, it rolls back all of its changes and prints the error:
... validate_account(name)
... except ValueError as error:
... entry_savepoint.rollback()
... six.print_('Error', str(error))
... print('Error', str(error))
... else:
... six.print_('Updated', name)
... print('Updated', name)
... except Exception as error:
... savepoint.rollback()
... six.print_('Unexpected exception', error)
... print('Unexpected exception', error)
Now let's try applying some entries:
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment