Commit dcbeab41 authored by Jim Fulton's avatar Jim Fulton

Merge remote-tracking branch 'origin/master' into asyncio

Conflicts:
	.travis.yml
	setup.py
	src/ZEO/ClientStorage.py
	src/ZEO/cache.py
	src/ZEO/tests/testZEO.py
	src/ZEO/zrpc/client.py
	src/ZEO/zrpc/server.py

Also, removed load from the asyncion client implementation, since it
isn't used anymore.
parents da4d4ce1 a7a5fab7
language: python
sudo: false
python:
- 3.4
- 3.5
matrix:
include:
- os: linux
python: 3.4
- os: linux
python: 3.5
- os: osx
language: generic
env: TERRYFY_PYTHON='macpython 3.4'
- os: osx
language: generic
env: TERRYFY_PYTHON='homebrew 3'
before_install:
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then git clone https://github.com/MacPython/terryfy; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then source terryfy/travis_tools.sh; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then get_python_environment $TERRYFY_PYTHON venv; fi
- if [[ "$TERRYFY_PYTHON" == "homebrew 3" ]]; then alias pip=`which pip3` ; fi
install:
- pip install -U setuptools
- python bootstrap.py
......
Changelog
=========
4.2.0 (unreleased)
4.2.0 (2016-06-15)
------------------
- Changed loadBefore to operate more like load behaved, especially
with regard to the load lock. This allowes ZEO to work with the
upcoming ZODB 5, which used loadbefore rather than load.
Reimplemented load using loadBefore, thus testing loadBefore
extensively via existing tests.
- Other changes to work with ZODB 5 (as well as ZODB 4)
- Fixed: the ZEO cache loadBefore method failed to utilize current data.
- Drop support for Python 2.6 and 3.2.
4.2.0b1 (2015-06-05)
......
......@@ -11,9 +11,8 @@
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Setup
"""
version = '5.0.0a0'
from setuptools import setup, find_packages
import os
import sys
......
......@@ -50,6 +50,9 @@ import ZEO.cache
logger = logging.getLogger(__name__)
# max signed 64-bit value ~ infinity :) Signed cuz LBTree and TimeStamp
m64 = b'\x7f\xff\xff\xff\xff\xff\xff\xff'
try:
from ZODB.ConflictResolution import ResolvedSerial
except ImportError:
......@@ -499,7 +502,10 @@ class ClientStorage(object):
return self._call('loadSerial', oid, serial)
def load(self, oid, version=''):
return self._server.load(oid)
result = self.loadBefore(oid, m64)
if result is None:
raise POSException.POSKeyError(oid)
return result[:2]
def loadBefore(self, oid, tid):
return self._server.load_before(oid, tid)
......@@ -778,6 +784,7 @@ class ClientStorage(object):
self._commit_lock.release()
def lastTransaction(self):
with self._lock:
return self._cache.getLastTid()
def tpc_abort(self, txn, timeout=None):
......@@ -1081,8 +1088,8 @@ def _check_blob_cache_size(blob_dir, target):
logger = logging.getLogger(__name__+'.check_blob_cache')
layout = open(os.path.join(blob_dir, ZODB.blob.LAYOUT_MARKER)
).read().strip()
with open(os.path.join(blob_dir, ZODB.blob.LAYOUT_MARKER)) as layout_file:
layout = layout_file.read().strip()
if not layout == 'zeocache':
logger.critical("Invalid blob directory layout %s", layout)
raise ValueError("Invalid blob directory layout", layout)
......
......@@ -478,7 +478,7 @@ class ClientCache(object):
# @return (data record, serial number, tid), or None if the object is not
# in the cache
# @defreturn 3-tuple: (string, string, string)
def load(self, oid):
def load(self, oid, before_tid=None):
ofs = self.current.get(oid)
if ofs is None:
self._trace(0x20, oid)
......@@ -493,6 +493,9 @@ class ClientCache(object):
assert end_tid == z64, (ofs, self.f.tell(), oid, tid, end_tid)
assert lver == 0, "Versions aren't supported"
if before_tid and tid >= before_tid:
return None
data = read(ldata)
assert len(data) == ldata, (ofs, self.f.tell(), oid, len(data), ldata)
......@@ -532,13 +535,22 @@ class ClientCache(object):
def loadBefore(self, oid, before_tid):
noncurrent_for_oid = self.noncurrent.get(u64(oid))
if noncurrent_for_oid is None:
result = self.load(oid, before_tid)
if result:
return result[0], result[1], None
else:
self._trace(0x24, oid, "", before_tid)
return None
return result
items = noncurrent_for_oid.items(None, u64(before_tid)-1)
if not items:
result = self.load(oid, before_tid)
if result:
return result[0], result[1], None
else:
self._trace(0x24, oid, "", before_tid)
return None
return result
tid, ofs = items[-1]
self.f.seek(ofs)
......@@ -559,8 +571,12 @@ class ClientCache(object):
assert read(8) == oid, (ofs, self.f.tell(), oid)
if end_tid < before_tid:
result = self.load(oid, before_tid)
if result:
return result[0], result[1], None
else:
self._trace(0x24, oid, "", before_tid)
return None
return result
self._n_accesses += 1
self._trace(0x26, oid, "", saved_tid)
......
......@@ -598,6 +598,10 @@ class InvqTests(CommonSetupTearDown):
revid2 = self._dostore(oid2)
revid2 = self._dostore(oid2, revid2)
forker.wait_until(
lambda :
perstorage.lastTransaction() == self._storage.lastTransaction())
perstorage.load(oid, '')
perstorage.close()
......@@ -606,12 +610,6 @@ class InvqTests(CommonSetupTearDown):
revid = self._dostore(oid, revid)
perstorage = self.openClientStorage(cache="test")
forker.wait_until(
func=(lambda : perstorage.verify_result == "quick verification"),
timeout=60,
label="perstorage.verify_result to be quick verification")
self.assertEqual(perstorage.verify_result, "quick verification")
self.assertEqual(perstorage.load(oid, ''),
......
......@@ -176,8 +176,9 @@ Start a server:
Open a client storage to it and commit a some transactions:
>>> import ZEO, transaction
>>> db = ZEO.DB(addr)
>>> import ZEO, ZODB, transaction
>>> client = ZEO.client(addr)
>>> db = ZODB.DB(client)
>>> conn = db.open()
>>> for i in range(10):
... conn.root().i = i
......@@ -185,19 +186,19 @@ Open a client storage to it and commit a some transactions:
Create an iterator:
>>> it = conn._storage.iterator()
>>> it = client.iterator()
>>> tid1 = it.next().tid
Restart the storage:
>>> stop_server(adminaddr)
>>> wait_disconnected(conn._storage)
>>> wait_disconnected(client)
>>> _ = start_server('<filestorage>\npath fs\n</filestorage>', addr=addr)
>>> wait_connected(conn._storage)
>>> wait_connected(client)
Now, we'll create a second iterator:
>>> it2 = conn._storage.iterator()
>>> it2 = client.iterator()
If we try to advance the first iterator, we should get an error:
......
......@@ -162,10 +162,10 @@ def start_zeo_server(storage_conf=None, zeo_conf=None, port=None, keep=False,
else:
pid = subprocess.Popen(args, env=d, close_fds=True).pid
# We need to wait until the server starts, but not forever.
# 30 seconds is a somewhat arbitrary upper bound. A BDBStorage
# takes a long time to open -- more than 10 seconds on occasion.
for i in range(300):
# We need to wait until the server starts, but not forever. 150
# seconds is a somewhat arbitrary upper bound, but probably helps
# in an address already in use situation.
for i in range(1500):
time.sleep(0.1)
try:
if isinstance(adminaddr, str) and not os.path.exists(adminaddr):
......
......@@ -1058,9 +1058,9 @@ def client_asyncore_thread_has_name():
"""
>>> addr, _ = start_server()
>>> db = ZEO.DB(addr)
>>> len([t for t in threading.enumerate()
... if ' zeo client networking thread' in t.getName()])
1
>>> any(t for t in threading.enumerate()
... if ' zeo client networking thread' in t.getName())
True
>>> db.close()
"""
......@@ -1299,9 +1299,9 @@ But, if we abort, we'll get up to date data and we'll see the changes.
>>> sorted(conn2.root.x.items())
[('x', 1), ('y', 1)]
>>> conn2.close()
>>> cs.close()
>>> conn1.close()
"""
......@@ -1392,7 +1392,8 @@ def gracefully_handle_abort_while_storing_many_blobs():
>>> logging.getLogger().addHandler(handler)
>>> addr, _ = start_server(blob_dir='blobs')
>>> c = ZEO.connection(addr, blob_dir='cblobs')
>>> client = ZEO.client(addr, blob_dir='cblobs')
>>> c = ZODB.connection(client)
>>> c.root.x = ZODB.blob.Blob(b'z'*(1<<20))
>>> c.root.y = ZODB.blob.Blob(b'z'*(1<<2))
>>> t = c.transaction_manager.get()
......@@ -1409,7 +1410,7 @@ Now we'll try to use the connection, mainly to wait for everything to
get processed. Before we fixed this by making tpc_finish a synchronous
call to the server. we'd get some sort of error here.
>>> _ = c._storage._call('loadEx', b'\0'*8)
>>> _ = client._call('loadEx', b'\0'*8)
>>> c.close()
......
......@@ -314,7 +314,9 @@ class CacheTests(ZODB.tests.util.TestCase):
# We use large-2 for the same reason we used small-1 above.
expected_len = large-2
self.assertEquals(len(cache), expected_len)
expected_oids = set(list(range(11, 50))+list(range(106, 110))+list(range(200, 305)))
expected_oids = set(list(range(11, 50)) +
list(range(106, 110)) +
list(range(200, 305)))
self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids)
......@@ -336,6 +338,21 @@ class CacheTests(ZODB.tests.util.TestCase):
self.cache.setLastTid(p64(3))
self.cache.setLastTid(p64(4))
def test_loadBefore_doesnt_miss_current(self):
# Make sure that loadBefore get's current data if there
# isn't non-current data
cache = self.cache
oid = n1
cache.store(oid, n1, None, b'first')
self.assertEqual(cache.loadBefore(oid, n1), None)
self.assertEqual(cache.loadBefore(oid, n2), (b'first', n1, None))
self.cache.invalidate(oid, n2)
cache.store(oid, n2, None, b'second')
self.assertEqual(cache.loadBefore(oid, n1), None)
self.assertEqual(cache.loadBefore(oid, n2), (b'first', n1, n2))
self.assertEqual(cache.loadBefore(oid, n3), (b'second', n2, None))
def kill_does_not_cause_cache_corruption():
r"""
......
......@@ -242,19 +242,24 @@ class Connection(smac.SizedMessageAsyncConnection, object):
# Undone oid info returned by vote.
#
# Z3101 -- checkCurrentSerialInTransaction
#
# Z4 -- checkCurrentSerialInTransaction
# No-longer call load.
# Protocol variables:
# Our preferred protocol.
current_protocol = b"Z3101"
current_protocol = b"Z4"
# If we're a client, an exhaustive list of the server protocols we
# can accept.
servers_we_can_talk_to = [b"Z308", b"Z309", b"Z310", current_protocol]
servers_we_can_talk_to = [b"Z308", b"Z309", b"Z310", b"Z3101",
current_protocol]
# If we're a server, an exhaustive list of the client protocols we
# can accept.
clients_we_can_talk_to = [
b"Z200", b"Z201", b"Z303", b"Z308", b"Z309", b"Z310", current_protocol]
b"Z200", b"Z201", b"Z303", b"Z308", b"Z309", b"Z310", b"Z3101",
current_protocol]
# This is pretty excruciating. Details:
#
......
......@@ -81,6 +81,7 @@ class Dispatcher(asyncore.dispatcher):
time.sleep(5)
else:
break
self.listen(5)
def writable(self):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment