Commit 52fb8d46 authored by Jason Madden's avatar Jason Madden

Fix many deprecation/resource warnings, and bump up test verbosity on travis

Some of the tests are being killed because we don't produce any output
when running parallel tests in a layer with -v1.

The amount of warnings made it hard to see real failures.

Also use more recent PyPy, and test on Py 3.6 (which produces the
'async' warnings we'll use to fix #104)
parent a2e12be7
......@@ -5,11 +5,13 @@ matrix:
- os: linux
python: 2.7
- os: linux
python: pypy-5.6.0
python: pypy
- os: linux
python: 3.4
- os: linux
python: 3.5
- os: linux
python: 3.6
- os: linux
python: 3.4
env: ZEO_MTACCEPTOR=1
......@@ -35,6 +37,6 @@ cache:
directories:
- eggs
script:
- bin/test -v1j99
- bin/test -vv -j99
notifications:
email: false
......@@ -122,7 +122,7 @@ class Protocol(base.Protocol):
cr = self.loop.create_unix_connection(
self.protocol_factory, self.addr, ssl=self.ssl)
self._connecting = cr = asyncio.async(cr, loop=self.loop)
self._connecting = cr = asyncio.ensure_future(cr, loop=self.loop)
@cr.add_done_callback
def done_connecting(future):
......
......@@ -191,7 +191,7 @@ class Acceptor(asyncore.dispatcher):
server_hostname=''
)
asyncio.async(cr, loop=loop)
asyncio.ensure_future(cr, loop=loop)
loop.run_forever()
loop.close()
......
......@@ -152,7 +152,7 @@ assert best_protocol_version in ServerProtocol.protocols
def new_connection(loop, addr, socket, zeo_storage, msgpack):
protocol = ServerProtocol(loop, addr, zeo_storage, msgpack)
cr = loop.create_connection((lambda : protocol), sock=socket)
asyncio.async(cr, loop=loop)
asyncio.ensure_future(cr, loop=loop)
class Delay(object):
"""Used to delay response to client for synchronous calls.
......@@ -231,7 +231,7 @@ class Acceptor(object):
else:
cr = loop.create_unix_server(self.factory, addr, ssl=ssl)
f = asyncio.async(cr, loop=loop)
f = asyncio.ensure_future(cr, loop=loop)
server = loop.run_until_complete(f)
self.server = server
......@@ -271,7 +271,7 @@ class Acceptor(object):
self.server.close()
f = asyncio.async(self.server.wait_closed(), loop=loop)
f = asyncio.ensure_future(self.server.wait_closed(), loop=loop)
@f.add_done_callback
def server_closed(f):
# stop the loop when the server closes:
......
......@@ -180,7 +180,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# Now we're connected, the cache was initialized, and the
# queued message has been sent:
self.assert_(client.connected.done())
self.assertTrue(client.connected.done())
self.assertEqual(cache.getLastTid(), 'a'*8)
self.assertEqual(self.pop(), (4, False, 'foo', (1, 2)))
......@@ -192,7 +192,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# Now we can make async calls:
f2 = self.async('bar', 3, 4)
self.assert_(f2.done() and f2.exception() is None)
self.assertTrue(f2.done() and f2.exception() is None)
self.assertEqual(self.pop(), (0, True, 'bar', (3, 4)))
# Loading objects gets special handling to leverage the cache.
......@@ -289,8 +289,8 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.assertEqual(f1.exception().args, (exc,))
# Because we reconnected, a new protocol and transport were created:
self.assert_(protocol is not loop.protocol)
self.assert_(transport is not loop.transport)
self.assertTrue(protocol is not loop.protocol)
self.assertTrue(transport is not loop.transport)
protocol = loop.protocol
transport = loop.transport
......@@ -313,7 +313,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# Because the server tid matches the cache tid, we're done connecting
wrapper.notify_connected.assert_called_with(client, {'length': 42})
self.assert_(client.connected.done() and not transport.data)
self.assertTrue(client.connected.done() and not transport.data)
self.assertEqual(cache.getLastTid(), b'e'*8)
# Because we were able to update the cache, we didn't have to
......@@ -322,7 +322,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# The close method closes the connection and cache:
client.close()
self.assert_(transport.closed and cache.closed)
self.assertTrue(transport.closed and cache.closed)
# The client doesn't reconnect
self.assertEqual(loop.protocol, protocol)
......@@ -351,7 +351,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.respond(4, dict(length=42))
# Now that verification is done, we're done connecting
self.assert_(client.connected.done() and not transport.data)
self.assertTrue(client.connected.done() and not transport.data)
self.assertEqual(cache.getLastTid(), b'e'*8)
# And the cache has been updated:
......@@ -388,7 +388,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.respond(4, dict(length=42))
# Now that verification is done, we're done connecting
self.assert_(client.connected.done() and not transport.data)
self.assertTrue(client.connected.done() and not transport.data)
self.assertEqual(cache.getLastTid(), b'e'*8)
# But the cache is now empty and we invalidated the database cache
......@@ -402,7 +402,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
addrs, ())
# We haven't connected yet
self.assert_(protocol is None and transport is None)
self.assertTrue(protocol is None and transport is None)
# There are 2 connection attempts outstanding:
self.assertEqual(sorted(loop.connecting), addrs)
......@@ -413,7 +413,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# The failed connection is attempted in the future:
delay, func, args, _ = loop.later.pop(0)
self.assert_(1 <= delay <= 2)
self.assertTrue(1 <= delay <= 2)
func(*args)
self.assertEqual(sorted(loop.connecting), addrs)
......@@ -447,7 +447,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.pop()
self.assertFalse(client.connected.done() or transport.data)
delay, func, args, _ = loop.later.pop(1) # first in later is heartbeat
self.assert_(8 < delay < 10)
self.assertTrue(8 < delay < 10)
self.assertEqual(len(loop.later), 1) # first in later is heartbeat
func(*args) # connect again
self.assertFalse(protocol is loop.protocol)
......@@ -461,8 +461,8 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.pop(4)
self.assertEqual(self.pop(), (3, False, 'get_info', ()))
self.respond(3, dict(length=42))
self.assert_(client.connected.done() and not transport.data)
self.assert_(client.ready)
self.assertTrue(client.connected.done() and not transport.data)
self.assertTrue(client.ready)
def test_readonly_fallback(self):
addrs = [('1.2.3.4', 8200), ('2.2.3.4', 8200)]
......@@ -493,7 +493,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# At this point, the client is ready and using the protocol,
# and the protocol is read-only:
self.assert_(client.ready)
self.assertTrue(client.ready)
self.assertEqual(client.protocol, protocol)
self.assertEqual(protocol.read_only, True)
connected = client.connected
......@@ -502,7 +502,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.assertEqual(self.pop(), (4, False, 'get_info', ()))
self.respond(4, dict(length=42))
self.assert_(connected.done())
self.assertTrue(connected.done())
# We connect the second address:
loop.connect_connecting(addrs[1])
......@@ -527,7 +527,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
self.assertFalse(client.protocol is protocol)
self.assertEqual(client.protocol, loop.protocol)
self.assertEqual(protocol.closed, True)
self.assert_(client.connected is not connected)
self.assertTrue(client.connected is not connected)
self.assertFalse(client.connected.done())
protocol, transport = loop.protocol, loop.transport
self.assertEqual(protocol.read_only, False)
......@@ -535,8 +535,8 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# Now, we finish verification
self.respond(2, 'b'*8)
self.respond(3, dict(length=42))
self.assert_(client.ready)
self.assert_(client.connected.done())
self.assertTrue(client.ready)
self.assertTrue(client.connected.done())
def test_invalidations_while_verifying(self):
# While we're verifying, invalidations are ignored
......@@ -553,8 +553,8 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
# We'll disconnect:
protocol.connection_lost(Exception("lost"))
self.assert_(protocol is not loop.protocol)
self.assert_(transport is not loop.transport)
self.assertTrue(protocol is not loop.protocol)
self.assertTrue(transport is not loop.transport)
protocol = loop.protocol
transport = loop.transport
......@@ -606,7 +606,7 @@ class ClientTests(Base, setupstack.TestCase, ClientRunner):
with mock.patch("ZEO.asyncio.client.logger.error") as error:
self.assertFalse(error.called)
protocol.data_received(sized(self.enc + b'200'))
self.assert_(isinstance(error.call_args[0][1], ProtocolError))
self.assertTrue(isinstance(error.call_args[0][1], ProtocolError))
def test_get_peername(self):
......
......@@ -266,7 +266,7 @@ class ConnectionTests(CommonSetupTearDown):
self.startServer(create=0, index=0, ro_svr=1)
# Start a read-only-fallback client
self._storage = self.openClientStorage(read_only_fallback=1)
self.assert_(self._storage.isReadOnly())
self.assertTrue(self._storage.isReadOnly())
# Stores should fail here
self.assertRaises(ReadOnlyError, self._dostore)
self._storage.close()
......@@ -493,7 +493,7 @@ class ConnectionTests(CommonSetupTearDown):
# Wait for all threads to finish
for t in threads:
t.join(60)
self.failIf(t.isAlive(), "%s didn't die" % t.getName())
self.assertFalse(t.isAlive(), "%s didn't die" % t.getName())
finally:
for t in threads:
t.closeclients()
......@@ -949,7 +949,7 @@ class ReconnectionTests(CommonSetupTearDown):
break
except ClientDisconnected:
time.sleep(0.5)
self.assert_(did_a_store)
self.assertTrue(did_a_store)
self._storage.close()
class TimeoutTests(CommonSetupTearDown):
......@@ -971,7 +971,7 @@ class TimeoutTests(CommonSetupTearDown):
):
break
else:
self.assert_(False, 'bad logging')
self.assertTrue(False, 'bad logging')
storage.close()
......@@ -993,7 +993,7 @@ class TimeoutTests(CommonSetupTearDown):
def checkTimeoutAfterVote(self):
self._storage = storage = self.openClientStorage()
# Assert that the zeo cache is empty
self.assert_(not list(storage._cache.contents()))
self.assertTrue(not list(storage._cache.contents()))
# Create the object
oid = storage.new_oid()
obj = MinPO(7)
......@@ -1005,17 +1005,17 @@ class TimeoutTests(CommonSetupTearDown):
storage.tpc_vote(t)
# Now sleep long enough for the storage to time out
time.sleep(3)
self.assert_(
self.assertTrue(
(not storage.is_connected())
or
(storage.connection_count_for_tests > old_connection_count)
)
storage._wait()
self.assert_(storage.is_connected())
self.assertTrue(storage.is_connected())
# We expect finish to fail
self.assertRaises(ClientDisconnected, storage.tpc_finish, t)
# The cache should still be empty
self.assert_(not list(storage._cache.contents()))
self.assertTrue(not list(storage._cache.contents()))
# Load should fail since the object should not be in either the cache
# or the server.
self.assertRaises(KeyError, storage.load, oid, '')
......@@ -1079,10 +1079,10 @@ class MSTThread(threading.Thread):
for c in clients:
# Check that we got serials for all oids
for oid in c.__oids:
testcase.failUnless(oid in c.__serials)
testcase.assertIn(oid, c.__serials)
# Check that we got serials for no other oids
for oid in c.__serials.keys():
testcase.failUnless(oid in c.__oids)
testcase.assertIn(oid, c.__oids)
def closeclients(self):
# Close clients opened by run()
......@@ -1102,7 +1102,8 @@ def short_timeout(self):
# Run IPv6 tests if V6 sockets are supported
try:
socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
pass
except (socket.error, AttributeError):
pass
else:
......
......@@ -33,7 +33,7 @@ class IterationTests(object):
# make sure there's no race conditions cleaning out the weak refs
gc.disable()
try:
self.assertEquals(0, len(self._storage._iterator_ids))
self.assertEqual(0, len(self._storage._iterator_ids))
except AssertionError:
# Ok, we have ids. That should also mean that the
# weak dictionary has the same length.
......@@ -50,7 +50,7 @@ class IterationTests(object):
self.assertEqual(len(self._storage._iterators),
len(self._storage._iterator_ids))
self.assertEquals(0, len(self._storage._iterator_ids))
self.assertEqual(0, len(self._storage._iterator_ids))
finally:
if gc_enabled:
gc.enable()
......@@ -63,7 +63,7 @@ class IterationTests(object):
iid = server.iterator_start(None, None)
# None signals the end of iteration.
self.assertEquals(None, server.iterator_next(iid))
self.assertEqual(None, server.iterator_next(iid))
# The server has disposed the iterator already.
self.assertRaises(KeyError, server.iterator_next, iid)
......@@ -80,10 +80,10 @@ class IterationTests(object):
# At this point, a wrapping iterator might not have called the CS
# iterator yet. We'll consume one item to make sure this happens.
six.advance_iterator(iterator)
self.assertEquals(1, len(self._storage._iterator_ids))
self.assertEqual(1, len(self._storage._iterator_ids))
iid = list(self._storage._iterator_ids)[0]
self.assertEquals([], list(iterator))
self.assertEquals(0, len(self._storage._iterator_ids))
self.assertEqual([], list(iterator))
self.assertEqual(0, len(self._storage._iterator_ids))
# The iterator has run through, so the server has already disposed it.
self.assertRaises(KeyError, self._storage._call, 'iterator_next', iid)
......@@ -98,7 +98,7 @@ class IterationTests(object):
# don't see the transaction we just wrote being picked up, because
# iterators only see the state from the point in time when they were
# created.)
self.assert_(list(iterator))
self.assertTrue(list(iterator))
def checkIteratorGCStorageCommitting(self):
# We want the iterator to be garbage-collected, so we don't keep any
......@@ -111,7 +111,7 @@ class IterationTests(object):
self._dostore()
six.advance_iterator(self._storage.iterator())
self.assertEquals(1, len(self._storage._iterator_ids))
self.assertEqual(1, len(self._storage._iterator_ids))
iid = list(self._storage._iterator_ids)[0]
# GC happens at the transaction boundary. After that, both the storage
......@@ -154,7 +154,7 @@ class IterationTests(object):
# as well. I'm calling this directly to avoid accidentally
# calling tpc_abort implicitly.
self._storage.notify_disconnected()
self.assertEquals(0, len(self._storage._iterator_ids))
self.assertEqual(0, len(self._storage._iterator_ids))
def checkIteratorParallel(self):
self._dostore()
......@@ -163,10 +163,10 @@ class IterationTests(object):
iter2 = self._storage.iterator()
txn_info1 = six.advance_iterator(iter1)
txn_info2 = six.advance_iterator(iter2)
self.assertEquals(txn_info1.tid, txn_info2.tid)
self.assertEqual(txn_info1.tid, txn_info2.tid)
txn_info1 = six.advance_iterator(iter1)
txn_info2 = six.advance_iterator(iter2)
self.assertEquals(txn_info1.tid, txn_info2.tid)
self.assertEqual(txn_info1.tid, txn_info2.tid)
self.assertRaises(StopIteration, next, iter1)
self.assertRaises(StopIteration, next, iter2)
......
......@@ -119,7 +119,7 @@ class ThreadTests(object):
for t in threads:
t.join(30)
for i in threads:
self.failUnless(not t.isAlive())
self.assertFalse(t.isAlive())
# Helper for checkMTStores
def mtstorehelper(self):
......
......@@ -122,6 +122,9 @@ First, fake out the connection manager so we can make a connection:
... next = None
...
... return oid, oid*8, 'data ' + oid, next
...
... def close(self):
... pass
>>> client = ZEO.client(
... '', wait=False, _client_factory=Client)
......@@ -138,6 +141,7 @@ Now we'll have our way with it's private _server attr:
2
3
4
>>> client.close()
"""
......
......@@ -51,6 +51,7 @@ class TransBufTests(unittest.TestCase):
for i, (oid, d, resolved) in enumerate(tbuf):
self.assertEqual((oid, d), data[i][0])
self.assertEqual(resolved, data[i][1])
tbuf.close()
def test_suite():
return unittest.makeSuite(TransBufTests, 'check')
......@@ -221,15 +221,15 @@ class MiscZEOTests(object):
# available right after successful connection, this is required now.
addr = self._storage._addr
storage2 = ClientStorage(addr, **self._client_options())
self.assert_(storage2.is_connected())
self.assertEquals(ZODB.utils.z64, storage2.lastTransaction())
self.assertTrue(storage2.is_connected())
self.assertEqual(ZODB.utils.z64, storage2.lastTransaction())
storage2.close()
self._dostore()
storage3 = ClientStorage(addr, **self._client_options())
self.assert_(storage3.is_connected())
self.assertEquals(8, len(storage3.lastTransaction()))
self.assertNotEquals(ZODB.utils.z64, storage3.lastTransaction())
self.assertTrue(storage3.is_connected())
self.assertEqual(8, len(storage3.lastTransaction()))
self.assertNotEqual(ZODB.utils.z64, storage3.lastTransaction())
storage3.close()
class GenericTestBase(
......@@ -422,12 +422,12 @@ class FileStorageTests(FullGenericTests):
# ClientStorage itself doesn't implement IStorageIteration, but the
# FileStorage on the other end does, and thus the ClientStorage
# instance that is connected to it reflects this.
self.failIf(ZODB.interfaces.IStorageIteration.implementedBy(
self.assertFalse(ZODB.interfaces.IStorageIteration.implementedBy(
ZEO.ClientStorage.ClientStorage))
self.failUnless(ZODB.interfaces.IStorageIteration.providedBy(
self.assertTrue(ZODB.interfaces.IStorageIteration.providedBy(
self._storage))
# This is communicated using ClientStorage's _info object:
self.assertEquals(self._expected_interfaces,
self.assertEqual(self._expected_interfaces,
self._storage._info['interfaces']
)
......@@ -552,7 +552,7 @@ class ZRPCConnectionTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
log = str(handler)
handler.uninstall()
self.assert_("Client loop stopped unexpectedly" in log)
self.assertTrue("Client loop stopped unexpectedly" in log)
def checkExceptionLogsAtError(self):
# Test the exceptions are logged at error
......@@ -570,7 +570,7 @@ class ZRPCConnectionTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
self.assertRaises(ZODB.POSException.POSKeyError,
self._storage.history, None, None)
handler.uninstall()
self.assertEquals(str(handler), '')
self.assertEqual(str(handler), '')
def checkConnectionInvalidationOnReconnect(self):
......@@ -639,7 +639,7 @@ class CommonBlobTests(object):
tfname = bd_fh.name
oid = self._storage.new_oid()
data = zodb_pickle(blob)
self.assert_(os.path.exists(tfname))
self.assertTrue(os.path.exists(tfname))
t = TransactionMetaData()
try:
......@@ -650,9 +650,9 @@ class CommonBlobTests(object):
except:
self._storage.tpc_abort(t)
raise
self.assert_(not os.path.exists(tfname))
self.assertTrue(not os.path.exists(tfname))
filename = self._storage.fshelper.getBlobFilename(oid, revid)
self.assert_(os.path.exists(filename))
self.assertTrue(os.path.exists(filename))
with open(filename, 'rb') as f:
self.assertEqual(somedata, f.read())
......@@ -693,11 +693,11 @@ class CommonBlobTests(object):
filename = self._storage.loadBlob(oid, serial)
with open(filename, 'rb') as f:
self.assertEqual(somedata, f.read())
self.assert_(not(os.stat(filename).st_mode & stat.S_IWRITE))
self.assert_((os.stat(filename).st_mode & stat.S_IREAD))
self.assertTrue(not(os.stat(filename).st_mode & stat.S_IWRITE))
self.assertTrue((os.stat(filename).st_mode & stat.S_IREAD))
def checkTemporaryDirectory(self):
self.assertEquals(os.path.join(self.blob_cache_dir, 'tmp'),
self.assertEqual(os.path.join(self.blob_cache_dir, 'tmp'),
self._storage.temporaryDirectory())
def checkTransactionBufferCleanup(self):
......@@ -726,14 +726,14 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
somedata.write(("%s\n" % i).encode('ascii'))
def check_data(path):
self.assert_(os.path.exists(path))
f = open(path, 'rb')
self.assertTrue(os.path.exists(path))
somedata.seek(0)
d1 = d2 = 1
while d1 or d2:
d1 = f.read(8096)
d2 = somedata.read(8096)
self.assertEqual(d1, d2)
with open(path, 'rb') as f:
while d1 or d2:
d1 = f.read(8096)
d2 = somedata.read(8096)
self.assertEqual(d1, d2)
somedata.seek(0)
blob = Blob()
......@@ -743,7 +743,7 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
tfname = bd_fh.name
oid = self._storage.new_oid()
data = zodb_pickle(blob)
self.assert_(os.path.exists(tfname))
self.assertTrue(os.path.exists(tfname))
t = TransactionMetaData()
try:
......@@ -756,7 +756,7 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
raise
# The uncommitted data file should have been removed
self.assert_(not os.path.exists(tfname))
self.assertTrue(not os.path.exists(tfname))
# The file should be in the cache ...
filename = self._storage.fshelper.getBlobFilename(oid, revid)
......@@ -768,7 +768,7 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
ZODB.blob.BushyLayout().getBlobFilePath(oid, revid),
)
self.assert_(server_filename.startswith(self.blobdir))
self.assertTrue(server_filename.startswith(self.blobdir))
check_data(server_filename)
# If we remove it from the cache and call loadBlob, it should
......@@ -1203,7 +1203,7 @@ def runzeo_without_configfile():
... ''' % sys.path)
>>> import subprocess, re
>>> print(re.sub(b'\d\d+|[:]', b'', subprocess.Popen(
>>> print(re.sub(br'\d\d+|[:]', b'', subprocess.Popen(
... [sys.executable, 'runzeo', '-a:0', '-ft', '--test'],
... stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
... ).stdout.read()).decode('ascii'))
......
......@@ -149,6 +149,7 @@ We can start another client and get the storage lock.
>>> zs1.tpc_finish('1').set_sender(0, zs1.connection)
>>> fs.close()
>>> server.close()
"""
def errors_in_vote_should_clear_lock():
......@@ -408,6 +409,7 @@ If clients disconnect while waiting, they will be dequeued:
>>> logging.getLogger('ZEO').setLevel(logging.NOTSET)
>>> logging.getLogger('ZEO').removeHandler(handler)
>>> server.close()
"""
def lock_sanity_check():
......@@ -489,6 +491,8 @@ ZEOStorage as closed and see if trying to get a lock cleans it up:
>>> logging.getLogger('ZEO').setLevel(logging.NOTSET)
>>> logging.getLogger('ZEO').removeHandler(handler)
>>> server.close()
"""
def test_suite():
......@@ -507,4 +511,3 @@ def test_suite():
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
......@@ -141,12 +141,12 @@ class CacheTests(ZODB.tests.util.TestCase):
for i in range(50):
n = p64(i)
cache.store(n, n, None, data[i])
self.assertEquals(len(cache), i + 1)
self.assertEqual(len(cache), i + 1)
# The cache is now almost full. The next insert
# should delete some objects.
n = p64(50)
cache.store(n, n, None, data[51])
self.assert_(len(cache) < 51)
self.assertTrue(len(cache) < 51)
# TODO: Need to make sure eviction of non-current data
# are handled correctly.
......@@ -174,41 +174,44 @@ class CacheTests(ZODB.tests.util.TestCase):
eq(dict([(k, dict(v)) for (k, v) in copy.noncurrent.items()]),
dict([(k, dict(v)) for (k, v) in self.cache.noncurrent.items()]),
)
copy.close()
def testCurrentObjectLargerThanCache(self):
if self.cache.path:
os.remove(self.cache.path)
self.cache.close()
self.cache = ZEO.cache.ClientCache(size=50)
# We store an object that is a bit larger than the cache can handle.
self.cache.store(n1, n2, None, "x"*64)
# We can see that it was not stored.
self.assertEquals(None, self.cache.load(n1))
self.assertEqual(None, self.cache.load(n1))
# If an object cannot be stored in the cache, it must not be
# recorded as current.
self.assert_(n1 not in self.cache.current)
self.assertTrue(n1 not in self.cache.current)
# Regression test: invalidation must still work.
self.cache.invalidate(n1, n2)
def testOldObjectLargerThanCache(self):
if self.cache.path:
os.remove(self.cache.path)
self.cache.close()
cache = ZEO.cache.ClientCache(size=50)
# We store an object that is a bit larger than the cache can handle.
cache.store(n1, n2, n3, "x"*64)
# We can see that it was not stored.
self.assertEquals(None, cache.load(n1))
self.assertEqual(None, cache.load(n1))
# If an object cannot be stored in the cache, it must not be
# recorded as non-current.
self.assert_(1 not in cache.noncurrent)
self.assertTrue(1 not in cache.noncurrent)
def testVeryLargeCaches(self):
cache = ZEO.cache.ClientCache('cache', size=(1<<32)+(1<<20))
cache.store(n1, n2, None, b"x")
cache.close()
cache = ZEO.cache.ClientCache('cache', size=(1<<33)+(1<<20))
self.assertEquals(cache.load(n1), (b'x', n2))
self.assertEqual(cache.load(n1), (b'x', n2))
cache.close()
def testConversionOfLargeFreeBlocks(self):
......@@ -225,8 +228,8 @@ class CacheTests(ZODB.tests.util.TestCase):
cache.close()
with open('cache', 'rb') as f:
f.seek(12)
self.assertEquals(f.read(1), b'f')
self.assertEquals(struct.unpack(">I", f.read(4))[0],
self.assertEqual(f.read(1), b'f')
self.assertEqual(struct.unpack(">I", f.read(4))[0],
ZEO.cache.max_block_size)
if not sys.platform.startswith('linux'):
......@@ -261,8 +264,8 @@ class CacheTests(ZODB.tests.util.TestCase):
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+100*recsize+extra)
for i in range(100):
cache.store(p64(i), n1, None, data)
self.assertEquals(len(cache), 100)
self.assertEquals(os.path.getsize(
self.assertEqual(len(cache), 100)
self.assertEqual(os.path.getsize(
'cache'), ZEO.cache.ZEC_HEADER_SIZE+100*recsize+extra)
# Now make it smaller
......@@ -270,10 +273,10 @@ class CacheTests(ZODB.tests.util.TestCase):
small = 50
cache = ZEO.cache.ClientCache(
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra)
self.assertEquals(len(cache), small)
self.assertEquals(os.path.getsize(
self.assertEqual(len(cache), small)
self.assertEqual(os.path.getsize(
'cache'), ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra)
self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
self.assertEqual(set(u64(oid) for (oid, tid) in cache.contents()),
set(range(small)))
for i in range(100, 110):
cache.store(p64(i), n1, None, data)
......@@ -282,9 +285,9 @@ class CacheTests(ZODB.tests.util.TestCase):
# evicted because of the optimization to assure that we
# always get a free block after a new allocated block.
expected_len = small - 1
self.assertEquals(len(cache), expected_len)
self.assertEqual(len(cache), expected_len)
expected_oids = set(list(range(11, 50))+list(range(100, 110)))
self.assertEquals(
self.assertEqual(
set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids)
......@@ -292,8 +295,8 @@ class CacheTests(ZODB.tests.util.TestCase):
cache.close()
cache = ZEO.cache.ClientCache(
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+small*recsize+extra)
self.assertEquals(len(cache), expected_len)
self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
self.assertEqual(len(cache), expected_len)
self.assertEqual(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids)
# Now make it bigger
......@@ -301,10 +304,10 @@ class CacheTests(ZODB.tests.util.TestCase):
large = 150
cache = ZEO.cache.ClientCache(
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra)
self.assertEquals(len(cache), expected_len)
self.assertEquals(os.path.getsize(
self.assertEqual(len(cache), expected_len)
self.assertEqual(os.path.getsize(
'cache'), ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra)
self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
self.assertEqual(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids)
......@@ -313,19 +316,19 @@ class CacheTests(ZODB.tests.util.TestCase):
# We use large-2 for the same reason we used small-1 above.
expected_len = large-2
self.assertEquals(len(cache), expected_len)
self.assertEqual(len(cache), expected_len)
expected_oids = set(list(range(11, 50)) +
list(range(106, 110)) +
list(range(200, 305)))
self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
self.assertEqual(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids)
# Make sure we can reopen with same size
cache.close()
cache = ZEO.cache.ClientCache(
'cache', size=ZEO.cache.ZEC_HEADER_SIZE+large*recsize+extra)
self.assertEquals(len(cache), expected_len)
self.assertEquals(set(u64(oid) for (oid, tid) in cache.contents()),
self.assertEqual(len(cache), expected_len)
self.assertEqual(set(u64(oid) for (oid, tid) in cache.contents()),
expected_oids)
# Cleanup
......
......@@ -118,7 +118,7 @@ class SSLConfigTest(ZEOConfigTestBase):
stop()
@unittest.skipIf(forker.ZEO4_SERVER, "ZEO4 servers don't support SSL")
@mock.patch(('asyncio' if PY3 else 'trollius') + '.async')
@mock.patch(('asyncio' if PY3 else 'trollius') + '.ensure_future')
@mock.patch(('asyncio' if PY3 else 'trollius') + '.set_event_loop')
@mock.patch(('asyncio' if PY3 else 'trollius') + '.new_event_loop')
@mock.patch('ZEO.asyncio.client.new_event_loop')
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment