Commit 09958e6e authored by Jason Madden's avatar Jason Madden

More test cases that need to close blobs to deterministically run under PyPy.

parent d1324825
......@@ -34,7 +34,7 @@ A current client should be able to connect to a old server:
2
>>> conn.root()['blob1'] = ZODB.blob.Blob()
>>> r = conn.root()['blob1'].open('w').write(b'blob data 1')
>>> with conn.root()['blob1'].open('w') as f: r = f.write(b'blob data 1')
>>> transaction.commit()
>>> db2 = ZEO.DB(addr, blob_dir='server-blobs', shared_blob_dir=True)
......@@ -44,7 +44,7 @@ A current client should be able to connect to a old server:
... conn2.root().x += 1
... transaction.commit()
>>> conn2.root()['blob2'] = ZODB.blob.Blob()
>>> r = conn2.root()['blob2'].open('w').write(b'blob data 2')
>>> with conn2.root()['blob2'].open('w') as f: r = f.write(b'blob data 2')
>>> transaction.commit()
>>> @wait_until("Get the new data")
......@@ -76,9 +76,9 @@ A current client should be able to connect to a old server:
>>> conn.root().x
17
>>> conn.root()['blob1'].open().read()
>>> with conn.root()['blob1'].open() as f: f.read()
b'blob data 1'
>>> conn.root()['blob2'].open().read()
>>> with conn.root()['blob2'].open() as f: f.read()
b'blob data 2'
Note that when taking to a 3.8 server, iteration won't work:
......@@ -118,7 +118,7 @@ Note that we'll have to pull some hijinks:
2
>>> conn.root()['blob1'] = ZODB.blob.Blob()
>>> r = conn.root()['blob1'].open('w').write(b'blob data 1')
>>> with conn.root()['blob1'].open('w') as f: r = f.write(b'blob data 1')
>>> transaction.commit()
>>> db2 = ZEO.DB(addr, blob_dir='server-blobs', shared_blob_dir=True)
......@@ -128,7 +128,7 @@ Note that we'll have to pull some hijinks:
... conn2.root().x += 1
... transaction.commit()
>>> conn2.root()['blob2'] = ZODB.blob.Blob()
>>> r = conn2.root()['blob2'].open('w').write(b'blob data 2')
>>> with conn2.root()['blob2'].open('w') as f: r = f.write(b'blob data 2')
>>> transaction.commit()
......@@ -161,9 +161,9 @@ Note that we'll have to pull some hijinks:
>>> conn.root().x
17
>>> conn.root()['blob1'].open().read()
>>> with conn.root()['blob1'].open() as f: f.read()
b'blob data 1'
>>> conn.root()['blob2'].open().read()
>>> with conn.root()['blob2'].open() as f: f.read()
b'blob data 2'
Make some old protocol calls:
......
......@@ -52,7 +52,7 @@ Now, let's write some data:
>>> conn = db.open()
>>> for i in range(1, 101):
... conn.root()[i] = ZODB.blob.Blob()
... w = conn.root()[i].open('w').write((chr(i)*100).encode('ascii'))
... with conn.root()[i].open('w') as f: w = f.write((chr(i)*100).encode('ascii'))
>>> transaction.commit()
We've committed 10000 bytes of data, but our target size is 3000. We
......@@ -85,19 +85,19 @@ necessary, but the cache size will remain not much bigger than the
target:
>>> for i in range(1, 101):
... data = conn.root()[i].open().read()
... with conn.root()[i].open() as f: data = f.read()
... if data != (chr(i)*100).encode('ascii'):
... print('bad data', repr(chr(i)), repr(data))
>>> wait_until("size is reduced", check, 99, onfail)
>>> for i in range(1, 101):
... data = conn.root()[i].open().read()
... with conn.root()[i].open() as f: data = f.read()
... if data != (chr(i)*100).encode('ascii'):
... print('bad data', repr(chr(i)), repr(data))
>>> for i in range(1, 101):
... data = conn.root()[i].open('c').read()
... with conn.root()[i].open('c') as f: data = f.read()
... if data != (chr(i)*100).encode('ascii'):
... print('bad data', repr(chr(i)), repr(data))
......@@ -114,11 +114,11 @@ provoke problems:
... for i in range(300):
... time.sleep(0)
... i = random.randint(1, 100)
... data = conn.root()[i].open().read()
... with conn.root()[i].open() as f: data = f.read()
... if data != (chr(i)*100).encode('ascii'):
... print('bad data', repr(chr(i)), repr(data))
... i = random.randint(1, 100)
... data = conn.root()[i].open('c').read()
... with conn.root()[i].open('c') as f: data = f.read()
... if data != (chr(i)*100).encode('ascii'):
... print('bad data', repr(chr(i)), repr(data))
... db.close()
......@@ -143,4 +143,3 @@ provoke problems:
>>> db.close()
>>> ZEO.ClientStorage.BlobCacheLayout.size = orig_blob_cache_layout_size
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment