Commit aec09788 authored by Tres Seaver's avatar Tres Seaver

Suppress Py3k resource warnings files from 'open()'.

parent 56319ef6
...@@ -231,7 +231,8 @@ This tests tries to provoke this bug by: ...@@ -231,7 +231,8 @@ This tests tries to provoke this bug by:
... print(record.name, record.levelname, end=' ') ... print(record.name, record.levelname, end=' ')
... print(handler.format(record)) ... print(handler.format(record))
... if bad: ... if bad:
... print(open('server-%s.log' % addr[1]).read()) ... with open('server-%s.log' % addr[1]) as f:
... print(f.read())
... #else: ... #else:
... # logging.getLogger('ZEO').debug('GOOD %s' % c) ... # logging.getLogger('ZEO').debug('GOOD %s' % c)
... db.close() ... db.close()
......
...@@ -625,18 +625,17 @@ class CommonBlobTests: ...@@ -625,18 +625,17 @@ class CommonBlobTests:
blob_cache_dir = 'blob_cache' blob_cache_dir = 'blob_cache'
def checkStoreBlob(self): def checkStoreBlob(self):
from ZODB.utils import oid_repr, tid_repr
from ZODB.blob import Blob, BLOB_SUFFIX
from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, \
handle_serials
import transaction import transaction
from ZODB.blob import Blob
from ZODB.tests.StorageTestBase import handle_serials
from ZODB.tests.StorageTestBase import ZERO
from ZODB.tests.StorageTestBase import zodb_pickle
somedata = b'a' * 10 somedata = b'a' * 10
blob = Blob() blob = Blob()
bd_fh = blob.open('w') with blob.open('w') as bd_fh:
bd_fh.write(somedata) bd_fh.write(somedata)
bd_fh.close()
tfname = bd_fh.name tfname = bd_fh.name
oid = self._storage.new_oid() oid = self._storage.new_oid()
data = zodb_pickle(blob) data = zodb_pickle(blob)
...@@ -655,7 +654,8 @@ class CommonBlobTests: ...@@ -655,7 +654,8 @@ class CommonBlobTests:
self.assert_(not os.path.exists(tfname)) self.assert_(not os.path.exists(tfname))
filename = self._storage.fshelper.getBlobFilename(oid, revid) filename = self._storage.fshelper.getBlobFilename(oid, revid)
self.assert_(os.path.exists(filename)) self.assert_(os.path.exists(filename))
self.assertEqual(somedata, open(filename, 'rb').read()) with open(filename, 'rb') as f:
self.assertEqual(somedata, f.read())
def checkStoreBlob_wrong_partition(self): def checkStoreBlob_wrong_partition(self):
os_rename = os.rename os_rename = os.rename
...@@ -676,9 +676,8 @@ class CommonBlobTests: ...@@ -676,9 +676,8 @@ class CommonBlobTests:
somedata = b'a' * 10 somedata = b'a' * 10
blob = Blob() blob = Blob()
bd_fh = blob.open('w') with blob.open('w') as bd_fh:
bd_fh.write(somedata) bd_fh.write(somedata)
bd_fh.close()
tfname = bd_fh.name tfname = bd_fh.name
oid = self._storage.new_oid() oid = self._storage.new_oid()
data = zodb_pickle(blob) data = zodb_pickle(blob)
...@@ -695,7 +694,8 @@ class CommonBlobTests: ...@@ -695,7 +694,8 @@ class CommonBlobTests:
raise raise
filename = self._storage.loadBlob(oid, serial) filename = self._storage.loadBlob(oid, serial)
self.assertEquals(somedata, open(filename, 'rb').read()) with open(filename, 'rb') as f:
self.assertEqual(somedata, f.read())
self.assert_(not(os.stat(filename).st_mode & stat.S_IWRITE)) self.assert_(not(os.stat(filename).st_mode & stat.S_IWRITE))
self.assert_((os.stat(filename).st_mode & stat.S_IREAD)) self.assert_((os.stat(filename).st_mode & stat.S_IREAD))
...@@ -718,20 +718,30 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests): ...@@ -718,20 +718,30 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
"""ZEO backed by a BlobStorage-adapted FileStorage.""" """ZEO backed by a BlobStorage-adapted FileStorage."""
def checkStoreAndLoadBlob(self): def checkStoreAndLoadBlob(self):
from ZODB.utils import oid_repr, tid_repr
from ZODB.blob import Blob, BLOB_SUFFIX
from ZODB.tests.StorageTestBase import zodb_pickle, ZERO, \
handle_serials
import transaction import transaction
from ZODB.blob import Blob
from ZODB.tests.StorageTestBase import handle_serials
from ZODB.tests.StorageTestBase import ZERO
from ZODB.tests.StorageTestBase import zodb_pickle
somedata_path = os.path.join(self.blob_cache_dir, 'somedata') somedata_path = os.path.join(self.blob_cache_dir, 'somedata')
somedata = open(somedata_path, 'w+b') with open(somedata_path, 'w+b') as somedata:
for i in range(1000000): for i in range(1000000):
somedata.write(("%s\n" % i).encode('ascii')) somedata.write(("%s\n" % i).encode('ascii'))
def check_data(path):
self.assert_(os.path.exists(path))
f = open(path, 'rb')
somedata.seek(0)
d1 = d2 = 1
while d1 or d2:
d1 = f.read(8096)
d2 = somedata.read(8096)
self.assertEqual(d1, d2)
somedata.seek(0) somedata.seek(0)
blob = Blob() blob = Blob()
bd_fh = blob.open('w') with blob.open('w') as bd_fh:
ZODB.utils.cp(somedata, bd_fh) ZODB.utils.cp(somedata, bd_fh)
bd_fh.close() bd_fh.close()
tfname = bd_fh.name tfname = bd_fh.name
...@@ -753,16 +763,6 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests): ...@@ -753,16 +763,6 @@ class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
# The uncommitted data file should have been removed # The uncommitted data file should have been removed
self.assert_(not os.path.exists(tfname)) self.assert_(not os.path.exists(tfname))
def check_data(path):
self.assert_(os.path.exists(path))
f = open(path, 'rb')
somedata.seek(0)
d1 = d2 = 1
while d1 or d2:
d1 = f.read(8096)
d2 = somedata.read(8096)
self.assertEqual(d1, d2)
# The file should be in the cache ... # The file should be in the cache ...
filename = self._storage.fshelper.getBlobFilename(oid, revid) filename = self._storage.fshelper.getBlobFilename(oid, revid)
check_data(filename) check_data(filename)
...@@ -1201,7 +1201,8 @@ def dont_log_poskeyerrors_on_server(): ...@@ -1201,7 +1201,8 @@ def dont_log_poskeyerrors_on_server():
>>> cs.close() >>> cs.close()
>>> stop_server(admin) >>> stop_server(admin)
>>> 'POSKeyError' in open('server-%s.log' % addr[1]).read() >>> with open('server-%s.log' % addr[1]) as f:
... 'POSKeyError' in f.read()
False False
""" """
...@@ -1239,7 +1240,8 @@ def client_asyncore_thread_has_name(): ...@@ -1239,7 +1240,8 @@ def client_asyncore_thread_has_name():
def runzeo_without_configfile(): def runzeo_without_configfile():
""" """
>>> r = open('runzeo', 'w').write(''' >>> with open('runzeo', 'w') as r:
... r.write('''
... import sys ... import sys
... sys.path[:] = %r ... sys.path[:] = %r
... import ZEO.runzeo ... import ZEO.runzeo
...@@ -1342,7 +1344,8 @@ constructor. ...@@ -1342,7 +1344,8 @@ constructor.
>>> db.close() >>> db.close()
>>> @wait_until >>> @wait_until
... def check_for_test_label_1(): ... def check_for_test_label_1():
... for line in open('server-%s.log' % addr[1]): ... wih open('server-%s.log' % addr[1]) as f:
... for line in f:
... if 'test-label-1' in line: ... if 'test-label-1' in line:
... print(line.split()[1:4]) ... print(line.split()[1:4])
... return True ... return True
...@@ -1362,6 +1365,7 @@ You can specify the client label via a configuration file as well: ...@@ -1362,6 +1365,7 @@ You can specify the client label via a configuration file as well:
>>> db.close() >>> db.close()
>>> @wait_until >>> @wait_until
... def check_for_test_label_2(): ... def check_for_test_label_2():
... with open('server-%s.log' % addr[1]) as f:
... for line in open('server-%s.log' % addr[1]): ... for line in open('server-%s.log' % addr[1]):
... if 'test-label-2' in line: ... if 'test-label-2' in line:
... print(line.split()[1:4]) ... print(line.split()[1:4])
...@@ -1451,7 +1455,8 @@ sys.path[:] = %(path)r ...@@ -1451,7 +1455,8 @@ sys.path[:] = %(path)r
""" """
def generate_script(name, src): def generate_script(name, src):
open(name, 'w').write(script_template % dict( with open(name, 'w') as f:
f.write(script_template % dict(
exe=sys.executable, exe=sys.executable,
path=sys.path, path=sys.path,
src=src, src=src,
...@@ -1460,7 +1465,8 @@ def generate_script(name, src): ...@@ -1460,7 +1465,8 @@ def generate_script(name, src):
def runzeo_logrotate_on_sigusr2(): def runzeo_logrotate_on_sigusr2():
""" """
>>> port = get_port() >>> port = get_port()
>>> r = open('c', 'w').write(''' >>> with open('c', 'w') as r:
... r.write('''
... <zeo> ... <zeo>
... address %s ... address %s
... </zeo> ... </zeo>
...@@ -1478,19 +1484,23 @@ def runzeo_logrotate_on_sigusr2(): ...@@ -1478,19 +1484,23 @@ def runzeo_logrotate_on_sigusr2():
... ''') ... ''')
>>> import subprocess, signal >>> import subprocess, signal
>>> p = subprocess.Popen([sys.executable, 's', '-Cc'], close_fds=True) >>> p = subprocess.Popen([sys.executable, 's', '-Cc'], close_fds=True)
>>> wait_until('started', >>> with open('l') as f:
... lambda : os.path.exists('l') and ('listening on' in open('l').read()) ... wait_until('started',
... lambda : os.path.exists('l') and ('listening on' in f.read())
... ) ... )
>>> oldlog = open('l').read() >>> with open('l') as f:
... oldlog = f .read()
>>> os.rename('l', 'o') >>> os.rename('l', 'o')
>>> os.kill(p.pid, signal.SIGUSR2) >>> os.kill(p.pid, signal.SIGUSR2)
>>> wait_until('new file', lambda : os.path.exists('l')) >>> wait_until('new file', lambda : os.path.exists('l'))
>>> s = ClientStorage(port) >>> s = ClientStorage(port)
>>> s.close() >>> s.close()
>>> wait_until('See logging', lambda : ('Log files ' in open('l').read())) >>> with open('l') as f:
>>> open('o').read() == oldlog # No new data in old log ... wait_until('See logging', lambda : ('Log files ' in f.read()))
>>> with open('o') as f:
... f.read() == oldlog # No new data in old log
True True
# Cleanup: # Cleanup:
......
...@@ -46,9 +46,8 @@ class TestZEOOptions(TestZDOptions): ...@@ -46,9 +46,8 @@ class TestZEOOptions(TestZDOptions):
def setUp(self): def setUp(self):
self.tempfilename = tempfile.mktemp() self.tempfilename = tempfile.mktemp()
f = open(self.tempfilename, "w") with open(self.tempfilename, "w") as f:
f.write(self.configdata) f.write(self.configdata)
f.close()
def tearDown(self): def tearDown(self):
try: try:
......
...@@ -159,11 +159,10 @@ class CacheTests(ZODB.tests.util.TestCase): ...@@ -159,11 +159,10 @@ class CacheTests(ZODB.tests.util.TestCase):
path = tempfile.mktemp() path = tempfile.mktemp()
# Copy data from self.cache into path, reaching into the cache # Copy data from self.cache into path, reaching into the cache
# guts to make the copy. # guts to make the copy.
dst = open(path, "wb+") with open(path, "wb+") as dst:
src = self.cache.f src = self.cache.f
src.seek(0) src.seek(0)
dst.write(src.read(self.cache.maxsize)) dst.write(src.read(self.cache.maxsize))
dst.close()
copy = ZEO.cache.ClientCache(path) copy = ZEO.cache.ClientCache(path)
# Verify that internals of both objects are the same. # Verify that internals of both objects are the same.
...@@ -213,24 +212,22 @@ class CacheTests(ZODB.tests.util.TestCase): ...@@ -213,24 +212,22 @@ class CacheTests(ZODB.tests.util.TestCase):
cache.close() cache.close()
def testConversionOfLargeFreeBlocks(self): def testConversionOfLargeFreeBlocks(self):
f = open('cache', 'wb') with open('cache', 'wb') as f:
f.write(ZEO.cache.magic+ f.write(ZEO.cache.magic+
b'\0'*8 + b'\0'*8 +
b'f'+struct.pack(">I", (1<<32)-12) b'f'+struct.pack(">I", (1<<32)-12)
) )
f.seek((1<<32)-1) f.seek((1<<32)-1)
f.write(b'x') f.write(b'x')
f.close()
cache = ZEO.cache.ClientCache('cache', size=1<<32) cache = ZEO.cache.ClientCache('cache', size=1<<32)
cache.close() cache.close()
cache = ZEO.cache.ClientCache('cache', size=1<<32) cache = ZEO.cache.ClientCache('cache', size=1<<32)
cache.close() cache.close()
f = open('cache', 'rb') with open('cache', 'rb') as f:
f.seek(12) f.seek(12)
self.assertEquals(f.read(1), b'f') self.assertEquals(f.read(1), b'f')
self.assertEquals(struct.unpack(">I", f.read(4))[0], self.assertEquals(struct.unpack(">I", f.read(4))[0],
ZEO.cache.max_block_size) ZEO.cache.max_block_size)
f.close()
if not sys.platform.startswith('linux'): if not sys.platform.startswith('linux'):
# On platforms without sparse files, these tests are just way # On platforms without sparse files, these tests are just way
...@@ -347,8 +344,8 @@ isn't corrupted. To see this, we'll write a little script that ...@@ -347,8 +344,8 @@ isn't corrupted. To see this, we'll write a little script that
writes records to a cache file repeatedly. writes records to a cache file repeatedly.
>>> import os, random, sys, time >>> import os, random, sys, time
>>> with open('t', 'w') as file: >>> with open('t', 'w') as f:
... _ = file.write(''' ... _ = f.write('''
... import os, random, sys, time ... import os, random, sys, time
... try: ... try:
... import thread ... import thread
...@@ -1088,8 +1085,8 @@ def rename_bad_cache_file(): ...@@ -1088,8 +1085,8 @@ def rename_bad_cache_file():
""" """
An attempt to open a bad cache file will cause it to be dropped and recreated. An attempt to open a bad cache file will cause it to be dropped and recreated.
>>> with open('cache', 'w') as file: >>> with open('cache', 'w') as f:
... _ = file.write('x'*100) ... _ = f.write('x'*100)
>>> import logging, sys >>> import logging, sys
>>> handler = logging.StreamHandler(sys.stdout) >>> handler = logging.StreamHandler(sys.stdout)
>>> logging.getLogger().addHandler(handler) >>> logging.getLogger().addHandler(handler)
...@@ -1104,14 +1101,13 @@ An attempt to open a bad cache file will cause it to be dropped and recreated. ...@@ -1104,14 +1101,13 @@ An attempt to open a bad cache file will cause it to be dropped and recreated.
>>> cache.store(p64(1), p64(1), None, b'data') >>> cache.store(p64(1), p64(1), None, b'data')
>>> cache.close() >>> cache.close()
>>> f = open('cache') >>> with open('cache') as f:
>>> _ = f.seek(0, 2) ... _ = f.seek(0, 2)
>>> print(f.tell()) ... print(f.tell())
1000 1000
>>> f.close()
>>> with open('cache', 'w') as file: >>> with open('cache', 'w') as f:
... _ = file.write('x'*200) ... _ = f.write('x'*200)
>>> cache = ZEO.cache.ClientCache('cache', 1000) # doctest: +ELLIPSIS >>> cache = ZEO.cache.ClientCache('cache', 1000) # doctest: +ELLIPSIS
Removing bad cache file: 'cache' (prev bad exists). Removing bad cache file: 'cache' (prev bad exists).
Traceback (most recent call last): Traceback (most recent call last):
...@@ -1120,17 +1116,15 @@ An attempt to open a bad cache file will cause it to be dropped and recreated. ...@@ -1120,17 +1116,15 @@ An attempt to open a bad cache file will cause it to be dropped and recreated.
>>> cache.store(p64(1), p64(1), None, b'data') >>> cache.store(p64(1), p64(1), None, b'data')
>>> cache.close() >>> cache.close()
>>> f = open('cache') >>> with open('cache') as f:
>>> _ = f.seek(0, 2) ... _ = f.seek(0, 2)
>>> print(f.tell()) ... print(f.tell())
1000 1000
>>> f.close()
>>> f = open('cache.bad') >>> with open('cache.bad') as f:
>>> _ = f.seek(0, 2) ... _ = f.seek(0, 2)
>>> print(f.tell()) ... print(f.tell())
100 100
>>> f.close()
>>> logging.getLogger().removeHandler(handler) >>> logging.getLogger().removeHandler(handler)
>>> logging.getLogger().setLevel(old_level) >>> logging.getLogger().setLevel(old_level)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment