Commit c5e18c74 authored by Kirill Smelkov's avatar Kirill Smelkov

bigfile/zodb: Teach ZBigFile backend to use WCFS

By using WCFS as mmap-overlay for base data(*). WCFS-mode is still opt-in
with default remaining to use old full user-space virtual memory manager
mode as initially introduced in 2015.

Wendelin.core should be draftly usable in WCFS mode now.

This patch is organized as follows:

- file_zodb.cpp provides mmap-overlay operations for WCFS implemented via
  WCFS client library.
- file_zodb.py is adjusted accordingly to use WCFS if requested.
  Low-level things specific to gluing to file_zodb.cpp are moved to _file_zodb.pyx.
- the rest of the changes are drive-by by main ones.

(*) see the following patches for what is mmap-overlay:

- fae045cc  (bigfile/virtmem: Introduce "mmap overlay" mode)
- 23362204  (bigfile/py: Allow PyBigFile backend to expose "mmap overlay" functionality)

Some preliminary history:

kirr/wendelin.core@01916f09    X Draft demo that reading data through wcfs works
kirr/wendelin.core@fd58082a    X Fix build on old GCC
kirr/wendelin.core@f622e751    X tests: Stop wcfs spawned during tests
kirr/wendelin.core@f118617b    X tests: Don't try to stop wcfs that is already exited
parent 986cf86e
...@@ -26,10 +26,10 @@ for kind in ['t', 'fault', 'asan', 'tsan']: ...@@ -26,10 +26,10 @@ for kind in ['t', 'fault', 'asan', 'tsan']:
TestCase(t, ['make', t], envadj=envadj) # TODO summaryf=TAP.summary TestCase(t, ['make', t], envadj=envadj) # TODO summaryf=TAP.summary
# test.py/<stor> runs unit- and functional- tests for wendelin.core . # test.py/<stor>-!wcfs runs unit- and functional- tests for wendelin.core in non-wcfs mode.
for stor in storv: for stor in storv:
TestCase('test.py/%s-!wcfs' % stor, ['make', 'test.py'], TestCase('test.py/%s-!wcfs' % stor, ['make', 'test.py'],
envadj={'WENDELIN_CORE_TEST_DB': '<%s>' % stor}, envadj={'WENDELIN_CORE_TEST_DB': '<%s>' % stor, 'WENDELIN_CORE_VIRTMEM': 'rw:uvmm'},
summaryf=PyTest.summary) summaryf=PyTest.summary)
...@@ -39,9 +39,15 @@ for nproc in gonprocv: ...@@ -39,9 +39,15 @@ for nproc in gonprocv:
envadj={'GOMAXPROCS': nproc}) envadj={'GOMAXPROCS': nproc})
# test.wcfs/<stor> runs unit tests for WCFS # test.wcfs/<stor> runs unit tests for WCFS
# test.py/<stor>-wcfs runs unit- and functional- tests for wendelin.core in wcfs mode.
for stor in storv: for stor in storv:
envdb = {'WENDELIN_CORE_TEST_DB': '<%s>' % stor} envdb = {'WENDELIN_CORE_TEST_DB': '<%s>' % stor}
for nproc in gonprocv: for nproc in gonprocv:
TestCase('test.wcfs/%s:%s' % (stor, nproc), ['make', 'test.wcfs'], TestCase('test.wcfs/%s:%s' % (stor, nproc), ['make', 'test.wcfs'],
envadj=dict(GOMAXPROCS=nproc, **envdb), summaryf=PyTest.summary) envadj=dict(GOMAXPROCS=nproc, **envdb), summaryf=PyTest.summary)
for nproc in gonprocv:
TestCase('test.py/%s-wcfs:%s' % (stor, nproc), ['make', 'test.py'],
envadj=dict(WENDELIN_CORE_VIRTMEM='r:wcfs+w:uvmm', GOMAXPROCS=nproc, **envdb),
summaryf=PyTest.summary)
/_file_zodb.cpp
/_file_zodb.h
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# BigFile submodule for Wendelin # BigFile submodule for Wendelin
# Copyright (C) 2014-2015 Nexedi SA and Contributors. # Copyright (C) 2014-2021 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com> # Kirill Smelkov <kirr@nexedi.com>
# #
# This program is free software: you can Use, Study, Modify and Redistribute # This program is free software: you can Use, Study, Modify and Redistribute
...@@ -21,4 +21,11 @@ ...@@ -21,4 +21,11 @@
"""TODO big module-level picture description""" """TODO big module-level picture description"""
# preload golang.so -> libgolang.so. This way dynamic linker discovers where
# libgolang.so is, and so there will be no link failure due to libgolang.so not
# found, when our C++ libraries, that use libgolang.so, are loaded (e.g. libwcfs.so).
#
# https://github.com/mdavidsaver/setuptools_dso/issues/11#issuecomment-808258994
import golang
from ._bigfile import BigFile, WRITEOUT_STORE, WRITEOUT_MARKSTORED, ram_reclaim from ._bigfile import BigFile, WRITEOUT_STORE, WRITEOUT_MARKSTORED, ram_reclaim
...@@ -110,7 +110,7 @@ typedef struct PyBigFileH PyBigFileH; ...@@ -110,7 +110,7 @@ typedef struct PyBigFileH PyBigFileH;
* mmapping data. To avoid deadlocks all mmap-related functionality must be * mmapping data. To avoid deadlocks all mmap-related functionality must be
* nogil and so cannot be implemented in Python. * nogil and so cannot be implemented in Python.
* *
* The primary user of .blkmmapper functionality will be _ZBigFile which uses WCFS * The primary user of .blkmmapper functionality is _ZBigFile which uses WCFS
* and mmaps files from it to provide memory mappings for ZBigFile data. * and mmaps files from it to provide memory mappings for ZBigFile data.
*/ */
struct PyBigFile { struct PyBigFile {
......
# -*- coding: utf-8 -*-
# Wendelin.bigfile | WCFS part of BigFile ZODB backend
# Copyright (C) 2014-2021 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com>
#
# This program is free software: you can Use, Study, Modify and Redistribute
# it under the terms of the GNU General Public License version 3, or (at your
# option) any later version, as published by the Free Software Foundation.
#
# You can also Link and Combine this program with other software covered by
# the terms of any of the Free Software licenses or any of the Open Source
# Initiative approved licenses and Convey the resulting work. Corresponding
# source of such a combination shall include the source code for all other
# software used.
#
# This program is distributed WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See COPYING file for full licensing terms.
# See https://www.nexedi.com/licensing for rationale and options.
# cython: language_level=2
# distutils: language=c++
"""Module _file_zodb.pyx complements file_zodb.py with things that cannot be
implemented in Python.
It provides wcfs integration for ZBigFile handles opened with _use_wcfs=True.
"""
from __future__ import print_function, absolute_import
cdef extern from "wcfs/client/wcfs.h":
pass
cdef extern from "bigfile/_bigfile.h":
struct PyBigFile:
pass
ctypedef extern class wendelin.bigfile._bigfile.BigFile[object PyBigFile]:
pass
# ZBigFile_mmap_ops is virtmem mmap functions for _ZBigFile.
cdef extern from "<wendelin/bigfile/file.h>" nogil:
struct bigfile_ops:
pass
cdef extern from * nogil:
"""
extern const bigfile_ops ZBigFile_mmap_ops;
"""
const bigfile_ops ZBigFile_mmap_ops
from wendelin.wcfs.client cimport _wcfs as wcfs, _wczsync as wczsync
from golang cimport error, nil, pyerror
from cpython cimport PyCapsule_New
# _ZBigFile is base class for ZBigFile that provides BigFile-line base.
#
# The other base line is from Persistent. It is not possible to inherit from
# both Persistent and BigFile at the same time since both are C types and their
# layouts conflict.
#
# _ZBigFile:
#
# - redirects loadblk/storeblk calls to ZBigFile.
# - provides blkmmapper with WCFS integration.
cdef public class _ZBigFile(BigFile) [object _ZBigFile, type _ZBigFile_Type]:
cdef object zself # reference to ZBigFile
cdef wcfs.FileH wfileh # WCFS file handle. Initially nil, opened by blkmmapper
# _new creates new _ZBigFile associated with ZBigFile zself.
# XXX Cython does not allow __new__ nor to change arguments passed to __cinit__ / __init__
@staticmethod
def _new(zself, blksize):
cdef _ZBigFile obj = _ZBigFile.__new__(_ZBigFile, blksize)
obj.zself = zself
obj.wfileh = nil
return obj
def __dealloc__(_ZBigFile zf):
cdef error err = nil
if zf.wfileh != nil:
err = zf.wfileh.close()
zf.wfileh = nil
if err != nil:
raise pyerror.from_error(err)
# redirect load/store to main class
def loadblk(self, blk, buf): return self.zself.loadblk(blk, buf)
def storeblk(self, blk, buf): return self.zself.storeblk(blk, buf)
# blkmmapper complements loadblk/storeblk and is pycapsule with virtmem mmap
# functions for _ZBigFile. MMap functions rely on .wfileh being initialized
# by .fileh_open()
blkmmapper = PyCapsule_New(<void*>&ZBigFile_mmap_ops, "wendelin.bigfile.IBlkMMapper", NULL)
# fileh_open wraps BigFile.fileh_open and makes sure that WCFS file handle
# corresponding to ZBigFile is opened if use_wcfs=True.
def fileh_open(_ZBigFile zf, bint use_wcfs):
mmap_overlay = False
cdef wcfs.PyFileH pywfileh
if use_wcfs:
mmap_overlay = True
if zf.wfileh == nil:
zconn = zf.zself._p_jar
assert zconn is not None
# join zconn to wconn; link to wconn from _ZBigFile
pywconn = wczsync.pywconnOf(zconn)
pywfileh = pywconn.open(zf.zself._p_oid)
zf.wfileh = pywfileh.wfileh
return super(_ZBigFile, zf).fileh_open(mmap_overlay)
// Copyright (C) 2019-2021 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
// File file_zodb.cpp provides blkmmapper functions for _ZBigFile.
// MMapping is implemented via wcfs client.
#include "wcfs/client/wcfs.h"
#include "wendelin/bigfile/file.h"
#include "wendelin/bigfile/virtmem.h"
#include "bigfile/_bigfile.h"
#include "bigfile/_file_zodb.h"
#include <ccan/container_of/container_of.h>
static int zfile_mmap_setup_read(VMA *vma, BigFile *file, blk_t blk, size_t blklen) {
_ZBigFile* _zfile = container_of(file, _ZBigFile, __pyx_base.file);
wcfs::FileH fileh = _zfile->wfileh;
wcfs::Mapping mmap;
error err;
if (fileh == nil)
panic("BUG: zfile_mmap_setup_read: ZBigFile.fileh_open did not set .wfileh");
tie(mmap, err) = fileh->mmap(blk, blklen, vma);
if (err != nil) {
log::Errorf("%s", v(err)); // XXX no way to return error details to virtmem
return -1;
}
return 0;
}
static int zfile_remmap_blk_read(VMA *vma, BigFile *file, blk_t blk) {
wcfs::_Mapping* mmap = static_cast<wcfs::_Mapping*>(vma->mmap_overlay_server);
_ZBigFile* _zfile = container_of(file, _ZBigFile, __pyx_base.file);
if (mmap->fileh != _zfile->wfileh)
panic("BUG: zfile_remmap_blk_read: vma and _zfile point to different wcfs::FileH");
error err;
err = mmap->remmap_blk(blk);
if (err != nil) {
log::Errorf("%s", v(err)); // XXX no way to return error details to virtmem
return -1;
}
return 0;
}
static int zfile_munmap(VMA *vma, BigFile *file) {
wcfs::_Mapping* mmap = static_cast<wcfs::_Mapping*>(vma->mmap_overlay_server);
_ZBigFile* _zfile = container_of(file, _ZBigFile, __pyx_base.file);
if (mmap->fileh != _zfile->wfileh)
panic("BUG: zfile_remmap_blk_read: vma and _zfile point to different wcfs::FileH");
error err;
err = mmap->unmap();
if (err != nil) {
log::Errorf("%s", v(err)); // XXX no way to return error details to virtmem
return -1;
}
return 0;
}
// NOTE reusing whole bigfile_ops for just .mmap* ops.
extern const bigfile_ops ZBigFile_mmap_ops;
static bigfile_ops _mkZBigFile_mmap_ops() {
// workaround for "sorry, unimplemented: non-trivial designated initializers not supported"
bigfile_ops _;
_.mmap_setup_read = zfile_mmap_setup_read;
_.remmap_blk_read = zfile_remmap_blk_read;
_.munmap = zfile_munmap;
_.loadblk = NULL;
_.storeblk = NULL;
return _;
};
const bigfile_ops ZBigFile_mmap_ops = _mkZBigFile_mmap_ops();
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# Wendelin.bigfile | BigFile ZODB backend # Wendelin.bigfile | BigFile ZODB backend
# Copyright (C) 2014-2020 Nexedi SA and Contributors. # Copyright (C) 2014-2021 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com> # Kirill Smelkov <kirr@nexedi.com>
# #
# This program is free software: you can Use, Study, Modify and Redistribute # This program is free software: you can Use, Study, Modify and Redistribute
...@@ -50,6 +50,30 @@ The primary user of ZBigFile is ZBigArray (see bigarray/__init__.py and ...@@ -50,6 +50,30 @@ The primary user of ZBigFile is ZBigArray (see bigarray/__init__.py and
bigarray/array_zodb.py), but ZBigFile itself can be used directly too. bigarray/array_zodb.py), but ZBigFile itself can be used directly too.
Operating mode
--------------
Two operating modes are provided: "local-cache" and "shared-cache".
Local-cache is the mode wendelin.core was originally implemented with in 2015.
In this mode ZBigFile data is loaded from ZODB directly via current ZODB connection.
It was relatively straight-forward to implement, but cached file data become
duplicated in between ZODB connections of current process and in between
several client processes that use ZODB.
In shared-cache mode file's data is accessed through special filesystem for
which data cache is centrally maintained by OS kernel. This mode was added in
2021 and reduces wendelin.core RAM consumption dramatically. Note that even
though the cache is shared, isolation property is still fully provided. Please
see wcfs/wcfs.go which describes the filesystem and shared-cache mode in detail.
The mode of operation can be selected via environment variable::
$WENDELIN_CORE_VIRTMEM
rw:uvmm local-cache (i.e. !wcfs) (default)
r:wcfs+w:uvmm shared-cache (i.e. wcfs)
Data format Data format
----------- -----------
...@@ -88,7 +112,7 @@ For "2" we have ...@@ -88,7 +112,7 @@ For "2" we have
- low-overhead in terms of ZODB size (only part of a block is overwritten - low-overhead in terms of ZODB size (only part of a block is overwritten
in DB on single change), but in DB on single change), but
- high-overhead in terms of access time - high-overhead in terms of access time
(several objects need to be loaded for 1 block) (several objects need to be loaded for 1 block(*))
In general it is not possible to have low-overhead for both i) access-time, and In general it is not possible to have low-overhead for both i) access-time, and
ii) DB size, with approach where we do block objects representation / ii) DB size, with approach where we do block objects representation /
...@@ -98,6 +122,10 @@ On the other hand, if object management is moved to DB *server* side, it is ...@@ -98,6 +122,10 @@ On the other hand, if object management is moved to DB *server* side, it is
possible to deduplicate them there and this way have low-overhead for both possible to deduplicate them there and this way have low-overhead for both
access-time and DB size with just client storing 1 object per file block. This access-time and DB size with just client storing 1 object per file block. This
will be our future approach after we teach NEO about object deduplication. will be our future approach after we teach NEO about object deduplication.
(*) wcfs loads ZBlk1 subobjects in parallel, but today ZODB storage servers do
not scale well on such highly-concurrent access.
""" """
# ZBigFile organization # ZBigFile organization
...@@ -125,13 +153,12 @@ will be our future approach after we teach NEO about object deduplication. ...@@ -125,13 +153,12 @@ will be our future approach after we teach NEO about object deduplication.
# between virtmem subsystem and ZODB, and virtmem->ZODB propagation happens only # between virtmem subsystem and ZODB, and virtmem->ZODB propagation happens only
# at commit time. # at commit time.
# #
# Since, for performance reasons, virtmem subsystem is going away and BigFiles # ZBigFile follows second scheme and synchronizes dirty RAM with ZODB at commit time.
# will be represented by real FUSE-based filesystem with virtual memory being # See _ZBigFileH for details.
# done by kernel, where we cannot get callback on a page-dirtying, it is more
# natural to also use "2" here.
from wendelin.bigfile import BigFile, WRITEOUT_STORE, WRITEOUT_MARKSTORED from wendelin.bigfile import WRITEOUT_STORE, WRITEOUT_MARKSTORED
from wendelin.bigfile._file_zodb import _ZBigFile
from wendelin.lib.mem import bzero, memcpy from wendelin.lib.mem import bzero, memcpy
from wendelin.lib.zodb import LivePersistent, deactivate_btree from wendelin.lib.zodb import LivePersistent, deactivate_btree
...@@ -464,23 +491,6 @@ if ZBlk_fmt_write not in ZBlk_fmt_registry: ...@@ -464,23 +491,6 @@ if ZBlk_fmt_write not in ZBlk_fmt_registry:
# ---------------------------------------- # ----------------------------------------
# helper for ZBigFile - just redirect loadblk/storeblk back
# (because it is not possible to inherit from both Persistent and BigFile at
# the same time - see below)
class _ZBigFile(BigFile):
# .zself - reference to ZBigFile
def __new__(cls, zself, blksize):
obj = BigFile.__new__(cls, blksize)
obj.zself = zself
return obj
# redirect load/store to main class
def loadblk(self, blk, buf): return self.zself.loadblk(blk, buf)
def storeblk(self, blk, buf): return self.zself.storeblk(blk, buf)
# ZBigFile implements BigFile backend with data stored in ZODB. # ZBigFile implements BigFile backend with data stored in ZODB.
# #
# NOTE Can't inherit from Persistent and BigFile at the same time - both are C # NOTE Can't inherit from Persistent and BigFile at the same time - both are C
...@@ -510,7 +520,7 @@ class ZBigFile(LivePersistent): ...@@ -510,7 +520,7 @@ class ZBigFile(LivePersistent):
def __setstate__(self, state): def __setstate__(self, state):
self.blksize, self.blktab = state self.blksize, self.blktab = state
self._v_file = _ZBigFile(self, self.blksize) self._v_file = _ZBigFile._new(self, self.blksize)
self._v_filehset = WeakSet() self._v_filehset = WeakSet()
...@@ -560,17 +570,48 @@ class ZBigFile(LivePersistent): ...@@ -560,17 +570,48 @@ class ZBigFile(LivePersistent):
# invalidate data .blktab[blk] invalidated -> invalidate page # invalidate data .blktab[blk] invalidated -> invalidate page
def invalidateblk(self, blk): def invalidateblk(self, blk):
for fileh in self._v_filehset: for fileh in self._v_filehset:
# wcfs: there is no need to propagate ZODB -> fileh invalidation by
# client since WCFS handles invalidations from ZODB by itself.
#
# More: the algorythm to compute δ(ZODB) -> δ(blk) is more complex
# than 1-1 ZBlk <-> blk mapping: ZBlk could stay constant, but if
# ZBigFile.blktab topology is changed, affected file blocks have to
# be invalidated. Currently !wcfs codepath fails to handle that,
# while wcfs handles invalidations correctly. The plan is to make
# wcfs way the primary and to deprecate !wcfs.
#
# -> don't propagate ZODB -> WCFS invalidation by client to fully
# rely on and test wcfs subsystem.
if fileh.uses_mmap_overlay():
continue
fileh.invalidate_page(blk) # XXX assumes blksize == pagesize fileh.invalidate_page(blk) # XXX assumes blksize == pagesize
# fileh_open is bigfile-like method that creates new file-handle object # fileh_open is bigfile-like method that creates new file-handle object
# that is given to user for mmap. # that is given to user for mmap.
def fileh_open(self): #
fileh = _ZBigFileH(self) # _use_wcfs is internal option and controls whether to use wcfs to access
# ZBigFile data:
#
# - True -> use wcfs
# - False -> don't use wcfs
# - not set -> behave according to global default
def fileh_open(self, _use_wcfs=None):
if _use_wcfs is None:
_use_wcfs = self._default_use_wcfs()
fileh = _ZBigFileH(self, _use_wcfs)
self._v_filehset.add(fileh) self._v_filehset.add(fileh)
return fileh return fileh
# _default_use_wcfs returns whether default virtmem setting is to use wcfs or not.
@staticmethod
def _default_use_wcfs():
virtmem = os.environ.get("WENDELIN_CORE_VIRTMEM", "rw:uvmm") # unset -> !wcfs
virtmem = virtmem.lower()
return {"r:wcfs+w:uvmm": True, "rw:uvmm": False}[virtmem]
# BigFileH wrapper that also acts as DataManager proxying changes ZODB <- virtmem # BigFileH wrapper that also acts as DataManager proxying changes ZODB <- virtmem
...@@ -609,15 +650,18 @@ class ZBigFile(LivePersistent): ...@@ -609,15 +650,18 @@ class ZBigFile(LivePersistent):
# NOTE Bear in mind that after close, connection can be reopened in different # NOTE Bear in mind that after close, connection can be reopened in different
# thread - that's why we have to adjust registration to per-thread # thread - that's why we have to adjust registration to per-thread
# transaction_manager. # transaction_manager.
#
# See also _file_zodb.pyx -> ZSync which maintains and keeps zodb.Connection
# and wcfs.Connection in sync.
@implementer(IDataManager) @implementer(IDataManager)
@implementer(ISynchronizer) @implementer(ISynchronizer)
class _ZBigFileH(object): class _ZBigFileH(object):
# .zfile ZBigFile we were opened for # .zfile ZBigFile we were opened for
# .zfileh handle for ZBigFile in virtmem # .zfileh handle for ZBigFile in virtmem
def __init__(self, zfile): def __init__(self, zfile, use_wcfs):
self.zfile = zfile self.zfile = zfile
self.zfileh = zfile._v_file.fileh_open() self.zfileh = zfile._v_file.fileh_open(use_wcfs)
# FIXME zfile._p_jar could be None (ex. ZBigFile is newly created # FIXME zfile._p_jar could be None (ex. ZBigFile is newly created
# before first commit) # before first commit)
...@@ -679,6 +723,9 @@ class _ZBigFileH(object): ...@@ -679,6 +723,9 @@ class _ZBigFileH(object):
def invalidate_page(self, pgoffset): def invalidate_page(self, pgoffset):
return self.zfileh.invalidate_page(pgoffset) return self.zfileh.invalidate_page(pgoffset)
def uses_mmap_overlay(self):
return self.zfileh.uses_mmap_overlay()
# ~~~~ ISynchronizer ~~~~ # ~~~~ ISynchronizer ~~~~
def beforeCompletion(self, txn): def beforeCompletion(self, txn):
......
...@@ -75,6 +75,8 @@ def Blk(vma, i): ...@@ -75,6 +75,8 @@ def Blk(vma, i):
return ndarray(blksize32, offset=i*blksize, buffer=vma, dtype=uint32) return ndarray(blksize32, offset=i*blksize, buffer=vma, dtype=uint32)
def test_bigfile_filezodb(): def test_bigfile_filezodb():
ram_reclaim_all() # reclaim pages allocated by previous tests
root = dbopen() root = dbopen()
root['zfile'] = f = ZBigFile(blksize) root['zfile'] = f = ZBigFile(blksize)
transaction.commit() transaction.commit()
...@@ -137,6 +139,10 @@ def test_bigfile_filezodb(): ...@@ -137,6 +139,10 @@ def test_bigfile_filezodb():
# evict all loaded pages and test loading them again # evict all loaded pages and test loading them again
# (verifies ZBlk.loadblkdata() & loadblk logic when loading data the second time) # (verifies ZBlk.loadblkdata() & loadblk logic when loading data the second time)
reclaimed = ram_reclaim_all() reclaimed = ram_reclaim_all()
if fh.uses_mmap_overlay():
# in mmap-overlay mode no on-client RAM is allocated for read data
assert reclaimed == 0
else:
assert reclaimed >= blen # XXX assumes pagesize=blksize assert reclaimed >= blen # XXX assumes pagesize=blksize
for i in xrange(blen): for i in xrange(blen):
...@@ -500,7 +506,7 @@ def _test_bigfile_filezodb_vs_cache_invalidation(_drop_cache): ...@@ -500,7 +506,7 @@ def _test_bigfile_filezodb_vs_cache_invalidation(_drop_cache):
def test_bigfile_filezodb_vs_cache_invalidation(): def test_bigfile_filezodb_vs_cache_invalidation():
_test_bigfile_filezodb_vs_cache_invalidation(_drop_cache=lambda conn: None) _test_bigfile_filezodb_vs_cache_invalidation(_drop_cache=lambda conn: None)
@xfail @xfail # NOTE passes with wcfs
def test_bigfile_filezodb_vs_cache_invalidation_with_cache_pressure(): def test_bigfile_filezodb_vs_cache_invalidation_with_cache_pressure():
_test_bigfile_filezodb_vs_cache_invalidation(_drop_cache=lambda conn: conn._cache.minimize()) _test_bigfile_filezodb_vs_cache_invalidation(_drop_cache=lambda conn: conn._cache.minimize())
......
...@@ -22,6 +22,9 @@ from __future__ import print_function, absolute_import ...@@ -22,6 +22,9 @@ from __future__ import print_function, absolute_import
import pytest import pytest
import transaction import transaction
from golang import func, defer
from functools import partial
import gc
# reset transaction synchronizers before every test run. # reset transaction synchronizers before every test run.
# #
...@@ -58,3 +61,27 @@ def pytest_configure(config): ...@@ -58,3 +61,27 @@ def pytest_configure(config):
# XXX + $WENDELIN_CORE_WCFS_OPTIONS="-d -alsologtostderr -v=1" ? # XXX + $WENDELIN_CORE_WCFS_OPTIONS="-d -alsologtostderr -v=1" ?
if config.option.verbose > 1: if config.option.verbose > 1:
config.inicfg['log_cli_level'] = "INFO" config.inicfg['log_cli_level'] = "INFO"
# Before pytest exits, teardown WCFS server(s) that we automatically spawned
# during test runs in bigfile/bigarray/...
#
# If we do not do this, spawned wcfs servers are left running _and_ connected
# by stdout to nxdtest input - which makes nxdtest to wait for them to exit.
@func
def pytest_unconfigure(config):
# force collection of ZODB Connection(s) that were sitting in DB.pool(s)
# (DB should be closed)
gc.collect()
from wendelin import wcfs
for wc in wcfs._wcautostarted:
# NOTE: defer instead of direct call - to call all wc.close if there
# was multiple wc spawned, and proceeding till the end even if any
# particular call raises exception.
defer(partial(_wcclose_and_stop, wc))
@func
def _wcclose_and_stop(wc):
defer(wc._wcsrv.stop)
defer(wc.close)
...@@ -332,6 +332,16 @@ setup( ...@@ -332,6 +332,16 @@ setup(
language = 'c', language = 'c',
dsos = ['wendelin.bigfile.libvirtmem']), dsos = ['wendelin.bigfile.libvirtmem']),
PyGoExt('wendelin.bigfile._file_zodb',
['bigfile/_file_zodb.pyx',
'bigfile/file_zodb.cpp'],
depends = [
'wcfs/client/_wcfs.pxd',
'wcfs/client/_wczsync.pxd',
'bigfile/_bigfile.h',
] + libwcfs_h + libvirtmem_h,
dsos = ['wendelin.wcfs.client.libwcfs']),
PyGoExt('wendelin.wcfs.client._wcfs', PyGoExt('wendelin.wcfs.client._wcfs',
['wcfs/client/_wcfs.pyx'], ['wcfs/client/_wcfs.pyx'],
depends = libwcfs_h + libvirtmem_h, depends = libwcfs_h + libvirtmem_h,
......
# wendelin.core | tox setup # wendelin.core | tox setup
[tox] [tox]
envlist = py27-{ZODB4,ZODB5}-{zblk0,zblk1}-{fs,zeo,neo}-{numpy115,numpy116}, envlist = py27-{ZODB4,ZODB5}-{zblk0,zblk1}-{fs,zeo,neo}-{numpy115,numpy116}-{!wcfs,wcfs,wcfs:1,wcfs:2},
{py36,py37}-{ZODB4,ZODB5}-{zblk0,zblk1}-fs-{numpy115,numpy116}, {py36,py37}-{ZODB4,ZODB5}-{zblk0,zblk1}-fs-{numpy115,numpy116}-{!wcfs,wcfs,wcfs:1,wcfs:2},
py36-{ZODB4,ZODB5}-{zblk0,zblk1}-zeo-{numpy115,numpy116}, py36-{ZODB4,ZODB5}-{zblk0,zblk1}-zeo-{numpy115,numpy116}-{!wcfs,wcfs,wcfs:1,wcfs:2},
py37-ZODB5-{zblk0,zblk1}-zeo-{numpy115,numpy116} py37-ZODB5-{zblk0,zblk1}-zeo-{numpy115,numpy116-{!wcfs,wcfs,wcfs:1,wcfs:2}}
# (NOTE ZEO4 does not work with python3.7) # (NOTE ZEO4 does not work with python3.7)
# (NOTE NEO does not work on python3 at all) # (NOTE NEO does not work on python3 at all)
# (XXX ZODB5-*-neo are currently failing) # (XXX ZODB5-*-neo are currently failing)
...@@ -41,6 +41,17 @@ setenv = ...@@ -41,6 +41,17 @@ setenv =
zblk0: WENDELIN_CORE_ZBLK_FMT=ZBlk0 zblk0: WENDELIN_CORE_ZBLK_FMT=ZBlk0
zblk1: WENDELIN_CORE_ZBLK_FMT=ZBlk1 zblk1: WENDELIN_CORE_ZBLK_FMT=ZBlk1
!wcfs: WENDELIN_CORE_VIRTMEM=rw:uvmm
wcfs: WENDELIN_CORE_VIRTMEM=r:wcfs+w:uvmm
# some bugs are only likely to trigger when there is only 1 main OS thread in wcfs
wcfs:1: WENDELIN_CORE_VIRTMEM=r:wcfs+w:uvmm
wcfs:1: GOMAXPROCS=1
# ----//---- 2 main OS threads in wcfs
wcfs:2: WENDELIN_CORE_VIRTMEM=r:wcfs+w:uvmm
wcfs:2: GOMAXPROCS=2
commands= {envpython} setup.py test commands= {envpython} setup.py test
# XXX setenv = TMPDIR = ... ? (so that /tmp is not on tmpfs and we don't run out of memory on bench) # XXX setenv = TMPDIR = ... ? (so that /tmp is not on tmpfs and we don't run out of memory on bench)
# + {envpython} setup.py bench (?) # + {envpython} setup.py bench (?)
...@@ -159,6 +159,7 @@ def _open(wc, obj, mode='rb', at=None): ...@@ -159,6 +159,7 @@ def _open(wc, obj, mode='rb', at=None):
_wcmu = sync.Mutex() _wcmu = sync.Mutex()
_wcregistry = {} # mntpt -> WCFS _wcregistry = {} # mntpt -> WCFS
_wcautostarted = [] # of WCFS, with ._wcsrv != None, for wcfs we ever autostart'ed (for tests)
@func(WCFS) @func(WCFS)
def __init__(wc, mountpoint, fwcfs, wcsrv): def __init__(wc, mountpoint, fwcfs, wcsrv):
...@@ -221,6 +222,7 @@ def join(zurl, autostart=_default_autostart()): # -> WCFS ...@@ -221,6 +222,7 @@ def join(zurl, autostart=_default_autostart()): # -> WCFS
wcsrv, fwcfs = _start(zurl, "-autoexit") wcsrv, fwcfs = _start(zurl, "-autoexit")
wc = WCFS(mntpt, fwcfs, wcsrv) wc = WCFS(mntpt, fwcfs, wcsrv)
_wcautostarted.append(wc)
assert mntpt not in _wcregistry assert mntpt not in _wcregistry
_wcregistry[mntpt] = wc _wcregistry[mntpt] = wc
......
...@@ -24,7 +24,8 @@ ...@@ -24,7 +24,8 @@
# Package _wcfs provides Python-wrappers for C++ wcfs client package. # Package _wcfs provides Python-wrappers for C++ wcfs client package.
# #
# It wraps WCFS/Conn/FileH/Mapping and WatchLink to help client_test.py unit-test # It wraps WCFS/Conn/FileH/Mapping and WatchLink to help client_test.py unit-test
# WCFS base-layer mmap functionality. # WCFS base-layer mmap functionality. At functional level WCFS client (and especially
# pinner) is verified when running wendelin.core array tests in wcfs mode.
from golang cimport chan, structZ, string, error, refptr from golang cimport chan, structZ, string, error, refptr
from golang cimport context, cxx from golang cimport context, cxx
......
...@@ -20,6 +20,9 @@ ...@@ -20,6 +20,9 @@
"""client_test.py unit-tests virtmem layer provided by wcfs client. """client_test.py unit-tests virtmem layer provided by wcfs client.
WCFS filesystem itself is unit-tested by wcfs/wcfs_test.py . WCFS filesystem itself is unit-tested by wcfs/wcfs_test.py .
At functional level, the whole wendelin.core test suite is used to verify
wcfs.py/wcfs.go while running tox tests in wcfs mode.
""" """
from __future__ import print_function, absolute_import from __future__ import print_function, absolute_import
......
...@@ -240,7 +240,7 @@ ...@@ -240,7 +240,7 @@
// process, do not see changes made to the first mapping. // process, do not see changes made to the first mapping.
// //
// Since wendelin.core needs to provide coherency in between different slices // Since wendelin.core needs to provide coherency in between different slices
// of the same array, this is the mode wendelin.core will actually use. // of the same array, this is the mode wendelin.core actually uses.
// //
// 3. write to wcfs // 3. write to wcfs
// //
......
...@@ -21,6 +21,9 @@ ...@@ -21,6 +21,9 @@
Virtmem layer provided by wcfs client package is unit-tested by Virtmem layer provided by wcfs client package is unit-tested by
wcfs/client/client_test.py . wcfs/client/client_test.py .
At functional level, the whole wendelin.core test suite is used to verify
wcfs.py/wcfs.go while running tox tests in wcfs mode.
""" """
from __future__ import print_function, absolute_import from __future__ import print_function, absolute_import
...@@ -495,7 +498,7 @@ class tDB(tWCFS): ...@@ -495,7 +498,7 @@ class tDB(tWCFS):
for zf, zfDelta in t._changed.items(): for zf, zfDelta in t._changed.items():
dfile = DFile() dfile = DFile()
zconns.add(zf._p_jar) zconns.add(zf._p_jar)
zfh = zf.fileh_open() # NOTE does not use wcfs zfh = zf.fileh_open(_use_wcfs=False)
for blk, data in zfDelta.iteritems(): for blk, data in zfDelta.iteritems():
dfile.ddata[blk] = data dfile.ddata[blk] = data
data += b'\0'*(zf.blksize - len(data)) # trailing \0 data += b'\0'*(zf.blksize - len(data)) # trailing \0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment