Commit 9b97e435 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 3febc81c
...@@ -540,6 +540,9 @@ type BigFile struct { ...@@ -540,6 +540,9 @@ type BigFile struct {
// XXX mappings where client(s) requested isolation guarantee // XXX mappings where client(s) requested isolation guarantee
//mappings ... XXX -> watchers? //mappings ... XXX -> watchers?
// watchers attached to this file
watchers map[*Watcher]struct{}
} }
// blkLoadState represents a ZBlk load state/result. // blkLoadState represents a ZBlk load state/result.
...@@ -997,6 +1000,7 @@ func (f *BigFile) Read(_ nodefs.File, dest []byte, off int64, fctx *fuse.Context ...@@ -997,6 +1000,7 @@ func (f *BigFile) Read(_ nodefs.File, dest []byte, off int64, fctx *fuse.Context
if re := end % f.blksize; re != 0 { if re := end % f.blksize; re != 0 {
aend += f.blksize - re aend += f.blksize - re
} }
// XXX use original dest if it can fit the data
dest = make([]byte, aend - aoff) // ~> [aoff:aend) in file dest = make([]byte, aend - aoff) // ~> [aoff:aend) in file
// XXX better ctx = transaction.PutIntoContext(ctx, txn) // XXX better ctx = transaction.PutIntoContext(ctx, txn)
...@@ -1017,9 +1021,7 @@ func (f *BigFile) Read(_ nodefs.File, dest []byte, off int64, fctx *fuse.Context ...@@ -1017,9 +1021,7 @@ func (f *BigFile) Read(_ nodefs.File, dest []byte, off int64, fctx *fuse.Context
err := wg.Wait() err := wg.Wait()
if err != nil { if err != nil {
// XXX -> err2LogStatus return nil, err2LogStatus(err)
log.Errorf("%s", err) // XXX + /bigfile/XXX: read [a,b): -> ...
return nil, fuse.EIO
} }
return fuse.ReadResultData(dest[off-aoff:end-aoff]), fuse.OK return fuse.ReadResultData(dest[off-aoff:end-aoff]), fuse.OK
...@@ -1030,9 +1032,9 @@ func (f *BigFile) Read(_ nodefs.File, dest []byte, off int64, fctx *fuse.Context ...@@ -1030,9 +1032,9 @@ func (f *BigFile) Read(_ nodefs.File, dest []byte, off int64, fctx *fuse.Context
// see "7) when we receive a FUSE read(#blk) request ..." in overview. // see "7) when we receive a FUSE read(#blk) request ..." in overview.
// //
// len(dest) == blksize. // len(dest) == blksize.
func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) error { // called with head.zconnMu rlocked.
// XXX errctx? func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) (err error) {
// XXX locking defer xerr.Contextf(&err, "%s: readblk #%d", f.path(), blk)
// check if someone else is already loading this block // check if someone else is already loading this block
f.loadMu.Lock() f.loadMu.Lock()
...@@ -1053,66 +1055,32 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) error { ...@@ -1053,66 +1055,32 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) error {
case <-loading.ready: case <-loading.ready:
if loading.err == nil { if loading.err == nil {
copy(dest, loading.blkdata) copy(dest, loading.blkdata) // XXX copy
} }
return loading.err return loading.err
} }
} }
// noone was loading - we became responsible to load this block // noone was loading - we became responsible to load this block
blkdata, treepath, pathRevMax, err := f.zfile.LoadBlk(ctx, blk)
zfile := f.zfile
blkdata, treepath, pathRevMax, err := zfile.LoadBlk(ctx, blk)
loading.blkdata = blkdata loading.blkdata = blkdata
loading.err = err loading.err = err
close(loading.ready)
// only head/ has δbtree index.
if f.head.rev == 0 {
bfdir := f.head.bfdir
bfdir.indexMu.Lock() // XXX locking correct?
bfdir.indexLooked.Add(f, treepath)
bfdir.indexMu.Unlock()
}
// XXX before loading.ready?
blkrevmax, _ := f.δFtail.LastRevOf(blk, zfile.PJar().At())
blkrevmax = tidmin(blkrevmax, pathRevMax)
/*
// XXX remmapping - only if head.rev == 0
// XXX -> own func?
// XXX locking
for _, mapping := range f.mappings {
if revmax <= mapping.at || !mapping.blkrange.in(blk) {
continue // do nothing
}
if mapping.pinned.Contains(blk) {
continue // do nothing
}
rev = max(δFtail.by(blk) : _ <= mapping.at)
// XXX vvv -> go
client.remmap(mapping.addr[blk], file/@<rev>/data)
mapping.pinned.Add(blk)
}
*/
// data loaded with error - cleanup .loading // data loaded with error - cleanup .loading
if loading.err != nil { if loading.err != nil {
close(loading.ready)
f.loadMu.Lock() f.loadMu.Lock()
delete(f.loading, blk) delete(f.loading, blk)
f.loadMu.Unlock() f.loadMu.Unlock()
return err return err
} }
// data loaded ok // we have the data - it can be used after watchers are updated
copy(dest, blkdata) f.updateWatchers(blk, treepath, pathRevMax)
// data can be used now
close(loading.ready)
copy(dest, blkdata) // XXX copy
// store to kernel pagecache whole block that we've just loaded from database. // store to kernel pagecache whole block that we've just loaded from database.
// This way, even if the user currently requested to read only small portion from it, // This way, even if the user currently requested to read only small portion from it,
...@@ -1135,6 +1103,49 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) error { ...@@ -1135,6 +1103,49 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) error {
return nil return nil
} }
// updateWatchers complements readBlk and update watchers of the file after a
// block was loaded from ZODB and before block data is returned to kernel.
//
// see "7.2) for all registered client@at watchers ..."
func (f *BigFile) updateWatchers(blk int64, treepath []zodb.IPersistent, pathRevMax zodb.Tid) {
// only head/ is being watched for
if f.head.rev != 0 {
return
}
// update δbtree index
bfdir := f.head.bfdir
bfdir.indexMu.Lock() // XXX locking correct?
bfdir.indexLooked.Add(f, treepath)
bfdir.indexMu.Unlock()
blkrevmax, _ := f.δFtail.LastRevOf(blk, f.zfile.PJar().At()) // XXX = f.head.zconn.At()
blkrevmax = tidmin(blkrevmax, pathRevMax)
/*
// XXX remmapping - only if head.rev == 0
// XXX -> own func?
// XXX locking
for _, mapping := range f.mappings {
if revmax <= mapping.at || !mapping.blkrange.in(blk) {
continue // do nothing
}
if mapping.pinned.Contains(blk) {
continue // do nothing
}
rev = max(δFtail.by(blk) : _ <= mapping.at)
// XXX vvv -> go
client.remmap(mapping.addr[blk], file/@<rev>/data)
mapping.pinned.Add(blk)
}
*/
}
// uploadBlk complements readBlk and uploads loaded blkdata into OS cache. // uploadBlk complements readBlk and uploads loaded blkdata into OS cache.
func (f *BigFile) uploadBlk(blk int64, loading *blkLoadState) { func (f *BigFile) uploadBlk(blk int64, loading *blkLoadState) {
head := f.head head := f.head
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment