Commit eac5af84 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent bcd4aed8
...@@ -1046,7 +1046,8 @@ func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) { ...@@ -1046,7 +1046,8 @@ func (f *BigFile) invalidateBlk(ctx context.Context, blk int64) (err error) {
if int64(len(blkdata)) == blksize { if int64(len(blkdata)) == blksize {
func() { func() {
// store retrieved data back to OS cache for file @<rev>/file[blk] // store retrieved data back to OS cache for file @<rev>/file[blk]
blkrev, _ := f.LastBlkRev(ctx, blk, f.head.zconn.At()) δFtail := f.head.bfdir.δFtail
blkrev, _ := δFtail.LastBlkRev(ctx, f, blk, f.head.zconn.At())
frev, funlock, err := groot.lockRevFile(blkrev, f.zfile.POid()) frev, funlock, err := groot.lockRevFile(blkrev, f.zfile.POid())
if err != nil { if err != nil {
log.Errorf("BUG: %s: invalidate blk #%d: %s (ignoring, but reading @revX/bigfile will be slow)", f.path(), blk, err) log.Errorf("BUG: %s: invalidate blk #%d: %s (ignoring, but reading @revX/bigfile will be slow)", f.path(), blk, err)
...@@ -1462,8 +1463,9 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr ...@@ -1462,8 +1463,9 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr
// (δFtail is just for δZ -> δF invalidation handling and is needed without isolation protocol) // (δFtail is just for δZ -> δF invalidation handling and is needed without isolation protocol)
// XXX ^^^ no - also need to query to send pins // XXX ^^^ no - also need to query to send pins
bfdir := f.head.bfdir bfdir := f.head.bfdir
δFtail := bfdir.δFtail
bfdir.δFmu.Lock() // XXX locking correct? XXX -> better push down? bfdir.δFmu.Lock() // XXX locking correct? XXX -> better push down?
bfdir.δFtail.Track(f, blk, treepath, zblk) // XXX pass in zblk.rev here? δFtail.Track(f, blk, treepath, zblk) // XXX pass in zblk.rev here?
bfdir.δFmu.Unlock() bfdir.δFmu.Unlock()
// make sure that file[blk] on clients side stays as of @w.at state. // make sure that file[blk] on clients side stays as of @w.at state.
...@@ -1497,7 +1499,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr ...@@ -1497,7 +1499,7 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr
// we'll relock atMu again and recheck blkrev vs w.at after. // we'll relock atMu again and recheck blkrev vs w.at after.
w.atMu.RUnlock() w.atMu.RUnlock()
blkrev, _ = f.LastBlkRev(ctx, blk, f.head.zconn.At()) blkrev, _ = δFtail.LastBlkRev(ctx, f, blk, f.head.zconn.At())
blkrevRough = false blkrevRough = false
w.atMu.RLock() w.atMu.RLock()
...@@ -1513,7 +1515,8 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr ...@@ -1513,7 +1515,8 @@ func (f *BigFile) readPinWatchers(ctx context.Context, blk int64, treepath []btr
// and most of them would be on different w.at - cache of the file will // and most of them would be on different w.at - cache of the file will
// be lost. Via pinning to particular block revision, we make sure the // be lost. Via pinning to particular block revision, we make sure the
// revision to pin is the same on all clients, and so file cache is shared. // revision to pin is the same on all clients, and so file cache is shared.
pinrev, _ := w.file.LastBlkRev(ctx, blk, w.at) // XXX move into go? pinrev, _ := δFtail.LastBlkRev(ctx, w.file, blk, w.at) // XXX move into go?
// XXX ^^^ w.file vs f ?
//fmt.Printf("S: read #%d: watch @%s: pin -> @%s\n", blk, w.at, pinrev) //fmt.Printf("S: read #%d: watch @%s: pin -> @%s\n", blk, w.at, pinrev)
wg.Go(func(ctx context.Context) error { wg.Go(func(ctx context.Context) error {
...@@ -1657,14 +1660,15 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T ...@@ -1657,14 +1660,15 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T
// pin all tracked file blocks that were changed in (at, head] range. // pin all tracked file blocks that were changed in (at, head] range.
toPin := map[int64]zodb.Tid{} // blk -> @rev toPin := map[int64]zodb.Tid{} // blk -> @rev
for _, δfile := range bfdir.δFtail.SliceByFileRev(f, at, headAt) { // XXX locking δFtail δFtail := bfdir.δFtail
for _, δfile := range δFtail.SliceByFileRev(f, at, headAt) { // XXX locking δFtail
for blk := range δfile.Blocks { for blk := range δfile.Blocks {
_, already := toPin[blk] _, already := toPin[blk]
if already { if already {
continue continue
} }
toPin[blk], _ = f.LastBlkRev(ctx, blk, at) // XXX err toPin[blk], _ = δFtail.LastBlkRev(ctx, f, blk, at) // XXX err
} }
} }
......
...@@ -430,8 +430,7 @@ func (δFtail *ΔFtail) SliceByFileRev(file *BigFile, lo, hi zodb.Tid) /*readonl ...@@ -430,8 +430,7 @@ func (δFtail *ΔFtail) SliceByFileRev(file *BigFile, lo, hi zodb.Tid) /*readonl
} }
// XXX -> ΔFtail.BlkRevAt (meth of ΔFtail, not file) // XXX rename -> BlkRevAt
// XXX -> BlkRevAt
// LastBlkRev returns last revision that changed file[blk] as of @at database state. // LastBlkRev returns last revision that changed file[blk] as of @at database state.
// //
// if exact=False - what is returned is only an upper bound for last block revision. // if exact=False - what is returned is only an upper bound for last block revision.
...@@ -441,10 +440,10 @@ func (δFtail *ΔFtail) SliceByFileRev(file *BigFile, lo, hi zodb.Tid) /*readonl ...@@ -441,10 +440,10 @@ func (δFtail *ΔFtail) SliceByFileRev(file *BigFile, lo, hi zodb.Tid) /*readonl
// blk must be tracked // blk must be tracked
// //
// XXX +ctx, error rebuild []δF here // XXX +ctx, error rebuild []δF here
func (f *BigFile) LastBlkRev(ctx context.Context, blk int64, at zodb.Tid) (_ zodb.Tid, exact bool) { func (δFtail *ΔFtail) LastBlkRev(ctx context.Context, f *BigFile, blk int64, at zodb.Tid) (_ zodb.Tid, exact bool) {
//defer xerr.Contextf(&err, "") // XXX text //defer xerr.Contextf(&err, "") // XXX text
δFtail := f.head.bfdir.δFtail // XXX assert δFtail == f.head.bfdir.δFtail ?
// XXX tabRev -> treeRev ? // XXX tabRev -> treeRev ?
// XXX activate zfile? // XXX activate zfile?
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment