Commit ef6d0669 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent e76f9f9a
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
# #
# See COPYING file for full licensing terms. # See COPYING file for full licensing terms.
# See https://www.nexedi.com/licensing for rationale and options. # See https://www.nexedi.com/licensing for rationale and options.
"""test wcfs filesystem from outside as python client process""" """wcfs_test tests wcfs filesystem from outside as python client process"""
from __future__ import print_function from __future__ import print_function
...@@ -116,7 +116,11 @@ def test_join_autostart(): ...@@ -116,7 +116,11 @@ def test_join_autostart():
assert os.path.isdir(wc.mountpoint + "/head/bigfile") assert os.path.isdir(wc.mountpoint + "/head/bigfile")
# tDB is database/wcfs testing environment. # tDB provides database/wcfs testing environment.
#
# XXX link -> tFile + tWatch.
#
# XXX print -> t.trace/debug() + t.verbose depending on py.test -v -v ?
class tDB: class tDB:
def __init__(t): def __init__(t):
t.root = testdb.dbopen() t.root = testdb.dbopen()
...@@ -126,6 +130,7 @@ class tDB: ...@@ -126,6 +130,7 @@ class tDB:
t._changed = {} # ZBigFile -> {} blk -> data t._changed = {} # ZBigFile -> {} blk -> data
# committed: head + head history # committed: head + head history
# XXX -> vδF (committed changes to files)
t.head = None t.head = None
t._headv = [] t._headv = []
...@@ -157,7 +162,9 @@ class tDB: ...@@ -157,7 +162,9 @@ class tDB:
assert len(data) <= zf.blksize assert len(data) <= zf.blksize
zfDelta[blk] = data zfDelta[blk] = data
# commit commits transaction and remembers/returns committed transaction ID. # commit commits transaction and makes sure wcfs is synchronized to it.
#
# It remembers/returns committed transaction ID.
def commit(t): def commit(t):
# perform modifications scheduled by change. # perform modifications scheduled by change.
# use !wcfs mode so that we prepare data independently of wcfs code paths. # use !wcfs mode so that we prepare data independently of wcfs code paths.
...@@ -171,7 +178,7 @@ class tDB: ...@@ -171,7 +178,7 @@ class tDB:
t._changed = {} t._changed = {}
# NOTE there is no clean way to retrieve tid of just committed transaction # NOTE there is no clean way to retrieve tid of just committed transaction
# we are using last._p_serial as workaround. # we use last._p_serial as workaround.
t.root['_last'] = last = Persistent() t.root['_last'] = last = Persistent()
last._p_changed = 1 last._p_changed = 1
...@@ -181,10 +188,14 @@ class tDB: ...@@ -181,10 +188,14 @@ class tDB:
t.ncommit += 1 t.ncommit += 1
t.head = head t.head = head
t._headv.append(head) t._headv.append(head)
# sync wcfs
t._wcsync()
return head return head
# wcsync makes sure wcfs synchronized to latest committed transaction. # _wcsync makes sure wcfs is synchronized to latest committed transaction.
def wcsync(t): def _wcsync(t):
while len(t._wc_zheadv) < len(t._headv): while len(t._wc_zheadv) < len(t._headv):
l = t._wc_zheadfh.readline() l = t._wc_zheadfh.readline()
#print('> zhead read: %r' % l) #print('> zhead read: %r' % l)
...@@ -239,7 +250,7 @@ class tDB: ...@@ -239,7 +250,7 @@ class tDB:
return tWatch(t) return tWatch(t)
# tFile is testing environment for one bigfile on wcfs. # tFile provides testing environment for one bigfile on wcfs.
class tFile: class tFile:
# maximum number of pages we mmap for 1 file. # maximum number of pages we mmap for 1 file.
# this should be not big not to exceed mlock limit. # this should be not big not to exceed mlock limit.
...@@ -251,10 +262,10 @@ class tFile: ...@@ -251,10 +262,10 @@ class tFile:
t.f = tdb._open(zf, at=at) t.f = tdb._open(zf, at=at)
t.blksize = zf.blksize t.blksize = zf.blksize
# mmap the file past the end up to XXX pages and lock the pages with # mmap the file past the end up to _max_tracked pages and lock the
# MLOCK_ONFAULT. This way when a page is read by mmap access we have # pages with MLOCK_ONFAULT. This way when a page is read by mmap access
# the guarantee from kernel that the page will stay in pagecache. We # we have the guarantee from kernel that the page will stay in
# rely on this to verify OS cache state. # pagecache. We rely on this to verify OS cache state.
assert t.blksize % mm.PAGE_SIZE == 0 assert t.blksize % mm.PAGE_SIZE == 0
t.fmmap = mm.map_ro(t.f.fileno(), 0, t._max_tracked*t.blksize) t.fmmap = mm.map_ro(t.f.fileno(), 0, t._max_tracked*t.blksize)
mm.lock(t.fmmap, mm.MLOCK_ONFAULT) mm.lock(t.fmmap, mm.MLOCK_ONFAULT)
...@@ -302,7 +313,7 @@ class tFile: ...@@ -302,7 +313,7 @@ class tFile:
assert t.cached() == incorev assert t.cached() == incorev
# blk returns bytearray connected to view of file[blk]. # blk returns bytearray view of file[blk].
def blk(t, blk): def blk(t, blk):
assert blk <= t._max_tracked assert blk <= t._max_tracked
return bytearray(t.fmmap[blk*t.blksize:(blk+1)*t.blksize]) return bytearray(t.fmmap[blk*t.blksize:(blk+1)*t.blksize])
...@@ -348,9 +359,7 @@ class tFile: ...@@ -348,9 +359,7 @@ class tFile:
# Expected blocks may be given with size < zf.blksize. In such case they # Expected blocks may be given with size < zf.blksize. In such case they
# are implicitly appended with trailing zeros. # are implicitly appended with trailing zeros.
# #
# It also check file size and optionally mtime. # It also checks file size and optionally mtime.
#
# XXX also check pagecache state?
def assertData(t, datav, mtime=None): def assertData(t, datav, mtime=None):
st = os.fstat(t.f.fileno()) st = os.fstat(t.f.fileno())
assert st.st_size == len(datav)*t.blksize assert st.st_size == len(datav)*t.blksize
...@@ -364,7 +373,7 @@ class tFile: ...@@ -364,7 +373,7 @@ class tFile:
t.assertCache([1]*len(datav)) t.assertCache([1]*len(datav))
# tWatch is testing environment for /head/watch opened on wcfs. # tWatch provides testing environment for /head/watch opened on wcfs.
class tWatch: class tWatch:
def __init__(t, tdb): def __init__(t, tdb):
...@@ -540,7 +549,6 @@ def test_wcfs(): ...@@ -540,7 +549,6 @@ def test_wcfs():
t.ncommit = 0 # so that atX in the code correspond with debug output t.ncommit = 0 # so that atX in the code correspond with debug output
at0_ = t.commit() at0_ = t.commit()
assert tidtime(at0_) > tidtime(at0) assert tidtime(at0_) > tidtime(at0)
t.wcsync()
# >>> lookup non-BigFile -> must be rejected # >>> lookup non-BigFile -> must be rejected
with raises(OSError) as exc: with raises(OSError) as exc:
...@@ -556,7 +564,6 @@ def test_wcfs(): ...@@ -556,7 +564,6 @@ def test_wcfs():
t.change(zf, {2: b'alpha'}) t.change(zf, {2: b'alpha'})
at1 = t.commit() at1 = t.commit()
t.wcsync()
f.assertCache([0,0,0]) # initially not cached f.assertCache([0,0,0]) # initially not cached
f.assertData ([b'',b'',b'alpha'], mtime=t.head) f.assertData ([b'',b'',b'alpha'], mtime=t.head)
...@@ -564,8 +571,6 @@ def test_wcfs(): ...@@ -564,8 +571,6 @@ def test_wcfs():
t.change(zf, {2: b'beta', 3: b'gamma'}) t.change(zf, {2: b'beta', 3: b'gamma'})
at2 = t.commit() at2 = t.commit()
t.wcsync()
# f @head # f @head
f.assertCache([1,1,0,0]) f.assertCache([1,1,0,0])
f.assertData ([b'',b'', b'beta', b'gamma'], mtime=t.head) f.assertData ([b'',b'', b'beta', b'gamma'], mtime=t.head)
...@@ -581,7 +586,6 @@ def test_wcfs(): ...@@ -581,7 +586,6 @@ def test_wcfs():
t.change(zf, {2: b'kitty'}) t.change(zf, {2: b'kitty'})
at3 = t.commit() at3 = t.commit()
t.wcsync()
f.assertCache([1,1,0,1]) f.assertCache([1,1,0,1])
# f @head is opened again -> cache must not be lost # f @head is opened again -> cache must not be lost
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment