Commit aec09788 authored by Tres Seaver's avatar Tres Seaver

Suppress Py3k resource warnings files from 'open()'.

parent 56319ef6
......@@ -231,7 +231,8 @@ This tests tries to provoke this bug by:
... print(record.name, record.levelname, end=' ')
... print(handler.format(record))
... if bad:
... print(open('server-%s.log' % addr[1]).read())
... with open('server-%s.log' % addr[1]) as f:
... print(f.read())
... #else:
... # logging.getLogger('ZEO').debug('GOOD %s' % c)
... db.close()
......
......@@ -625,18 +625,17 @@ class CommonBlobTests:
blob_cache_dir = 'blob_cache'
def checkStoreBlob(self):
from ZODB.utils import oid_repr, tid_repr
from ZODB.blob import Blob, BLOB_SUFFIX
from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, \
handle_serials
import transaction
from ZODB.blob import Blob
from ZODB.tests.StorageTestBase import handle_serials
from ZODB.tests.StorageTestBase import ZERO
from ZODB.tests.StorageTestBase import zodb_pickle
somedata = b'a' * 10
blob = Blob()
bd_fh = blob.open('w')
with blob.open('w') as bd_fh:
bd_fh.write(somedata)
bd_fh.close()
tfname = bd_fh.name
oid = self._storage.new_oid()
data = zodb_pickle(blob)
......@@ -655,7 +654,8 @@ class CommonBlobTests:
self.assert_(not os.path.exists(tfname))
filename = self._storage.fshelper.getBlobFilename(oid, revid)
self.assert_(os.path.exists(filename))
self.assertEqual(somedata, open(filename, 'rb').read())
with open(filename, 'rb') as f:
self.assertEqual(somedata, f.read())
def checkStoreBlob_wrong_partition(self):
os_rename = os.rename
......@@ -676,9 +676,8 @@ class CommonBlobTests:
somedata = b'a' * 10
blob = Blob()
bd_fh = blob.open('w')
with blob.open('w') as bd_fh:
bd_fh.write(somedata)
bd_fh.close()
tfname = bd_fh.name
oid = self._storage.new_oid()
data = zodb_pickle(blob)
......@@ -695,7 +694,8 @@ class CommonBlobTests:
raise
filename = self._storage.loadBlob(oid, serial)
self.assertEquals(somedata, open(filename, 'rb').read())
with open(filename, 'rb') as f:
self.assertEqual(somedata, f.read())
self.assert_(not(os.stat(filename).st_mode & stat.S_IWRITE))
self.assert_((os.stat(filename).st_mode & stat.S_IREAD))
......@@ -718,20 +718,30 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
"""ZEO backed by a BlobStorage-adapted FileStorage."""
def checkStoreAndLoadBlob(self):
from ZODB.utils import oid_repr, tid_repr
from ZODB.blob import Blob, BLOB_SUFFIX
from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, \
handle_serials
import transaction
from ZODB.blob import Blob
from ZODB.tests.StorageTestBase import handle_serials
from ZODB.tests.StorageTestBase import ZERO
from ZODB.tests.StorageTestBase import zodb_pickle
somedata_path = os.path.join(self.blob_cache_dir, 'somedata')
somedata = open(somedata_path, 'w+b')
with open(somedata_path, 'w+b') as somedata:
for i in range(1000000):
somedata.write(("%s\n" % i).encode('ascii'))
def check_data(path):
self.assert_(os.path.exists(path))
f = open(path, 'rb')
somedata.seek(0)
d1 = d2 = 1
while d1 or d2:
d1 = f.read(8096)
d2 = somedata.read(8096)
self.assertEqual(d1, d2)
somedata.seek(0)
blob = Blob()
bd_fh = blob.open('w')
with blob.open('w') as bd_fh:
ZODB.utils.cp(somedata, bd_fh)
bd_fh.close()
tfname = bd_fh.name
......@@ -753,16 +763,6 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
# The uncommitted data file should have been removed
self.assert_(not os.path.exists(tfname))
def check_data(path):
self.assert_(os.path.exists(path))
f = open(path, 'rb')
somedata.seek(0)
d1 = d2 = 1
while d1 or d2:
d1 = f.read(8096)
d2 = somedata.read(8096)
self.assertEqual(d1, d2)
# The file should be in the cache ...
filename = self._storage.fshelper.getBlobFilename(oid, revid)
check_data(filename)
......@@ -1201,7 +1201,8 @@ def dont_log_poskeyerrors_on_server():
>>> cs.close()
>>> stop_server(admin)
>>> 'POSKeyError' in open('server-%s.log' % addr[1]).read()
>>> with open('server-%s.log' % addr[1]) as f:
... 'POSKeyError' in f.read()
False
"""
......@@ -1239,7 +1240,8 @@ def client_asyncore_thread_has_name():
def runzeo_without_configfile():
"""
>>> r = open('runzeo', 'w').write('''
>>> with open('runzeo', 'w') as r:
... r.write('''
... import sys
... sys.path[:] = %r
... import ZEO.runzeo
......@@ -1342,7 +1344,8 @@ constructor.
>>> db.close()
>>> @wait_until
... def check_for_test_label_1():
... for line in open('server-%s.log' % addr[1]):
... wih open('server-%s.log' % addr[1]) as f:
... for line in f:
... if 'test-label-1' in line:
... print(line.split()[1:4])
... return True
......@@ -1362,6 +1365,7 @@ You can specify the client label via a configuration file as well:
>>> db.close()
>>> @wait_until
... def check_for_test_label_2():
... with open('server-%s.log' % addr[1]) as f:
... for line in open('server-%s.log' % addr[1]):
... if 'test-label-2' in line:
... print(line.split()[1:4])
......@@ -1451,7 +1455,8 @@ sys.path[:] = %(path)r
"""
def generate_script(name, src):
open(name, 'w').write(script_template % dict(
with open(name, 'w') as f:
f.write(script_template % dict(
exe=sys.executable,
path=sys.path,
src=src,
......@@ -1460,7 +1465,8 @@ def generate_script(name, src):
def runzeo_logrotate_on_sigusr2():
"""
>>> port = get_port()
>>> r = open('c', 'w').write('''
>>> with open('c', 'w') as r:
... r.write('''
... <zeo>
... address %s
... </zeo>
......@@ -1478,19 +1484,23 @@ def runzeo_logrotate_on_sigusr2():
... ''')
>>> import subprocess, signal
>>> p = subprocess.Popen([sys.executable, 's', '-Cc'], close_fds=True)
>>> wait_until('started',
... lambda : os.path.exists('l') and ('listening on' in open('l').read())
>>> with open('l') as f:
... wait_until('started',
... lambda : os.path.exists('l') and ('listening on' in f.read())
... )
>>> oldlog = open('l').read()
>>> with open('l') as f:
... oldlog = f .read()
>>> os.rename('l', 'o')
>>> os.kill(p.pid, signal.SIGUSR2)
>>> wait_until('new file', lambda : os.path.exists('l'))
>>> s = ClientStorage(port)
>>> s.close()
>>> wait_until('See logging', lambda : ('Log files ' in open('l').read()))
>>> open('o').read() == oldlog # No new data in old log
>>> with open('l') as f:
... wait_until('See logging', lambda : ('Log files ' in f.read()))
>>> with open('o') as f:
... f.read() == oldlog # No new data in old log
True
# Cleanup:
......
......@@ -46,9 +46,8 @@ class TestZEOOptions(TestZDOptions):
def setUp(self):
self.tempfilename = tempfile.mktemp()
f = open(self.tempfilename, "w")
with open(self.tempfilename, "w") as f:
f.write(self.configdata)
f.close()
def tearDown(self):
try:
......
......@@ -159,11 +159,10 @@ class CacheTests(ZODB.tests.util.TestCase):
path = tempfile.mktemp()
# Copy data from self.cache into path, reaching into the cache
# guts to make the copy.
dst = open(path, "wb+")
with open(path, "wb+") as dst:
src = self.cache.f
src.seek(0)
dst.write(src.read(self.cache.maxsize))
dst.close()
copy = ZEO.cache.ClientCache(path)
# Verify that internals of both objects are the same.
......@@ -213,24 +212,22 @@ class CacheTests(ZODB.tests.util.TestCase):
cache.close()
def testConversionOfLargeFreeBlocks(self):
f = open('cache', 'wb')
with open('cache', 'wb') as f:
f.write(ZEO.cache.magic+
b'\0'*8 +
b'f'+struct.pack(">I", (1<<32)-12)
)
f.seek((1<<32)-1)
f.write(b'x')
f.close()
cache = ZEO.cache.ClientCache('cache', size=1<<32)
cache.close()
cache = ZEO.cache.ClientCache('cache', size=1<<32)
cache.close()
f = open('cache', 'rb')
with open('cache', 'rb') as f:
f.seek(12)
self.assertEquals(f.read(1), b'f')
self.assertEquals(struct.unpack(">I", f.read(4))[0],
ZEO.cache.max_block_size)
f.close()
if not sys.platform.startswith('linux'):
# On platforms without sparse files, these tests are just way
......@@ -347,8 +344,8 @@ isn't corrupted. To see this, we'll write a little script that
writes records to a cache file repeatedly.
>>> import os, random, sys, time
>>> with open('t', 'w') as file:
... _ = file.write('''
>>> with open('t', 'w') as f:
... _ = f.write('''
... import os, random, sys, time
... try:
... import thread
......@@ -1088,8 +1085,8 @@ def rename_bad_cache_file():
"""
An attempt to open a bad cache file will cause it to be dropped and recreated.
>>> with open('cache', 'w') as file:
... _ = file.write('x'*100)
>>> with open('cache', 'w') as f:
... _ = f.write('x'*100)
>>> import logging, sys
>>> handler = logging.StreamHandler(sys.stdout)
>>> logging.getLogger().addHandler(handler)
......@@ -1104,14 +1101,13 @@ An attempt to open a bad cache file will cause it to be dropped and recreated.
>>> cache.store(p64(1), p64(1), None, b'data')
>>> cache.close()
>>> f = open('cache')
>>> _ = f.seek(0, 2)
>>> print(f.tell())
>>> with open('cache') as f:
... _ = f.seek(0, 2)
... print(f.tell())
1000
>>> f.close()
>>> with open('cache', 'w') as file:
... _ = file.write('x'*200)
>>> with open('cache', 'w') as f:
... _ = f.write('x'*200)
>>> cache = ZEO.cache.ClientCache('cache', 1000) # doctest: +ELLIPSIS
Removing bad cache file: 'cache' (prev bad exists).
Traceback (most recent call last):
......@@ -1120,17 +1116,15 @@ An attempt to open a bad cache file will cause it to be dropped and recreated.
>>> cache.store(p64(1), p64(1), None, b'data')
>>> cache.close()
>>> f = open('cache')
>>> _ = f.seek(0, 2)
>>> print(f.tell())
>>> with open('cache') as f:
... _ = f.seek(0, 2)
... print(f.tell())
1000
>>> f.close()
>>> f = open('cache.bad')
>>> _ = f.seek(0, 2)
>>> print(f.tell())
>>> with open('cache.bad') as f:
... _ = f.seek(0, 2)
... print(f.tell())
100
>>> f.close()
>>> logging.getLogger().removeHandler(handler)
>>> logging.getLogger().setLevel(old_level)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment