Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
W
wendelin.core
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Labels
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
Kirill Smelkov
wendelin.core
Commits
f7a26f7d
Commit
f7a26f7d
authored
Jul 03, 2020
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
1efb5876
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
264 additions
and
0 deletions
+264
-0
wcfs/δbtail.go
wcfs/δbtail.go
+264
-0
No files found.
wcfs/δbtail.go
View file @
f7a26f7d
...
...
@@ -946,6 +946,270 @@ func diffT(ctx context.Context, a, b *Tree, δZTC SetOid, trackIdx map[zodb.Oid]
return
δ
,
nil
}
func
__diffT
(
ctx
context
.
Context
,
a
,
b
*
Tree
,
δZTC
SetOid
,
trackIdx
map
[
zodb
.
Oid
]
nodeTrack
)
(
δ
map
[
Key
]
ΔValue
,
err
error
)
{
tracef
(
" diffT %s %s
\n
"
,
xidOf
(
a
),
xidOf
(
b
))
defer
xerr
.
Contextf
(
&
err
,
"diffT %s %s"
,
xidOf
(
a
),
xidOf
(
b
))
δ
=
map
[
Key
]
ΔValue
{}
defer
tracef
(
" -> δ: %v
\n
"
,
δ
)
var
av
rangeSplit
// nodes expanded from a
var
bv
rangeSplit
// nodes expanded from b
Aqueue
:=
SetKey
{}
// "to process" keys on A
Bqueue
:=
SetKey
{}
// "to process" keys on B
Adone
:=
SetKey
{}
// "processed" keys on A
Bdone
:=
SetKey
{}
// "processed" keys on B
if
b
!=
nil
{
// XXX kill (always !nil) ?
// XXX precise range as for a ^^^ ?
btop
:=
&
nodeInRange
{
lo
:
KeyMin
,
hi_
:
KeyMax
,
node
:
b
}
// [-∞, ∞)
bv
=
rangeSplit
{
btop
}
}
// initial phase: expand changed nodes in a till buckets;
// XXX changed buckets -> δ-
if
a
!=
nil
{
// XXX kill (always !nil) ?
// XXX maybe walk till a from root to get more precise initial range?
atop
:=
&
nodeInRange
{
lo
:
KeyMin
,
hi_
:
KeyMax
,
node
:
a
}
// [-∞, ∞)
av
=
rangeSplit
{
atop
}
aq
:=
[]
*
nodeInRange
{
atop
}
// stack
for
len
(
aq
)
>
0
{
l
:=
len
(
aq
)
arn
:=
aq
[
l
-
1
];
aq
=
aq
[
:
l
-
1
]
// arn=aq.pop()
atree
:=
arn
.
node
.
(
*
Tree
)
// ok - only trees in aq
err
=
atree
.
PActivate
(
ctx
);
if
err
!=
nil
{
return
nil
,
err
}
defer
atree
.
PDeactivate
()
// empty tree - do not expand into bucket - only process tracked holes
if
len
(
atree
.
Entryv
())
==
0
{
// XXX dup wrt bucket processing?
δA
:=
map
[
Key
]
ΔValue
{}
track
,
ok
:=
trackIdx
[
atree
.
POid
()]
if
!
ok
{
panicf
(
"%s ∈ δZTC, but ∉ trackIdx"
,
vnode
(
atree
))
}
for
k
:=
range
track
.
holes
{
δA
[
k
]
=
ΔValue
{
VDEL
,
VDEL
}
// ø->ø indicates hole
}
// δ <- δA
err
=
δMerge
(
δ
,
δA
)
if
err
!=
nil
{
return
nil
,
err
}
// Adone <- δA
// Bqueue <- δA
for
k
:=
range
δA
{
Adone
.
Add
(
k
)
Bqueue
.
Add
(
k
)
}
arn
.
done
=
true
continue
}
// normal tree - expand till buckets
children
:=
av
.
Expand
(
arn
)
for
_
,
rchild
:=
range
children
{
coid
:=
rchild
.
node
.
POid
()
if
!
(
δZTC
.
Has
(
coid
)
||
/* embedded bucket */
(
len
(
children
)
==
1
&&
coid
==
zodb
.
InvalidOid
)
)
{
continue
}
switch
node
:=
rchild
.
node
.
(
type
)
{
case
*
Tree
:
aq
=
append
(
aq
,
rchild
)
case
*
Bucket
:
δA
,
err
:=
diffB
(
ctx
,
node
,
nil
)
if
err
!=
nil
{
return
nil
,
err
}
// also -[k]ø (for tracked holes)
track
,
ok
:=
trackIdx
[
node
.
POid
()]
if
!
ok
{
panicf
(
"%s ∈ δZTC, but ∉ trackIdx"
,
vnode
(
node
))
}
for
k
:=
range
track
.
holes
{
δA
[
k
]
=
ΔValue
{
VDEL
,
VDEL
}
// ø->ø indicates hole
}
// δ <- δA
err
=
δMerge
(
δ
,
δA
)
if
err
!=
nil
{
return
nil
,
err
}
// Adone <- δA
// Bqueue <- δA
for
k
:=
range
δA
{
Adone
.
Add
(
k
)
Bqueue
.
Add
(
k
)
}
rchild
.
done
=
true
}
}
}
}
tracef
(
" av: %s
\n
"
,
av
)
tracef
(
" bv: %s
\n
"
,
bv
)
// phase 2: iterate through keys queued for A and B, delve into
// corresponding nodes, and merge diff generated from them into δ.
// Each delve for A or B, potentially adds new keys to process on the
// other side.
//
// XXX inefficient: we process each key separately, while they can be
// processed in sorted batches.
for
{
tracef
(
"
\n
"
)
tracef
(
" aq: %s
\n
"
,
Aqueue
)
tracef
(
" bq: %s
\n
"
,
Bqueue
)
if
len
(
Aqueue
)
==
0
&&
len
(
Bqueue
)
==
0
{
break
}
// B queue
// expand keys from new δA -> in B till buckets;
// process B buckets that cover new keys into δ+
Aqueue
=
SetKey
{}
for
k
:=
range
Bqueue
{
tracef
(
" B [%v]
\n
"
,
k
)
bnode
,
ok
,
err
:=
bv
.
GetToLeaf
(
ctx
,
k
)
if
err
!=
nil
{
return
nil
,
err
}
if
!
ok
{
// FIXME -> key must be included into some node.hole
continue
// key not covered
}
// +bucket if that bucket is reached for the first time
if
!
bnode
.
done
{
var
δB
map
[
Key
]
ΔValue
bbucket
,
ok
:=
bnode
.
node
.
(
*
Bucket
)
if
ok
{
// !ok means ø tree
δB
,
err
=
diffB
(
ctx
,
nil
,
bbucket
)
if
err
!=
nil
{
return
nil
,
err
}
}
// δ <- δB
err
=
δMerge
(
δ
,
δB
)
if
err
!=
nil
{
return
nil
,
err
}
// Bdone <- δB
// Aqueue <- δB
for
k_
:=
range
δB
{
Bdone
.
Add
(
k_
)
if
!
Adone
.
Has
(
k_
)
{
Aqueue
.
Add
(
k_
)
}
}
bnode
.
done
=
true
}
// k is not there -> +[k]ø
if
!
Bdone
.
Has
(
k
)
{
δB
:=
map
[
Key
]
ΔValue
{
k
:
{
VDEL
,
VDEL
}}
// [k]ø->ø
err
=
δMerge
(
δ
,
δB
);
if
err
!=
nil
{
return
nil
,
err
}
Bdone
.
Add
(
k
)
}
tracef
(
" bv: %s
\n
"
,
bv
)
}
// FIXME update trackIdx
/*
// update δc -> tracked keys
// ca = nil -> add cb to tracked
// cb = nil -> remove ca from tracked
// (their siblings must be already processed by diffX call)
if ca == nil {
trackIdx[child] = nodeTrack{parent: b.POid(), trackedKeys: SetKey{}}
}
if cb == nil {
delete(trackIdx, child) // XXX remove keys from parent?
} else {
trackedKeys := trackIdx[child].trackedKeys
for k, δv := range δc {
switch {
case δv.Old == VDEL:
trackedKeys.Add(k)
case δv.New == VDEL:
trackedKeys.Add(k)
// k v1->v2 no change in key
}
}
}
*/
// A queue
Bqueue
=
SetKey
{}
for
k
:=
range
Aqueue
{
tracef
(
" A [%v]
\n
"
,
k
)
anode
,
ok
,
err
:=
av
.
GetToLeaf
(
ctx
,
k
)
if
err
!=
nil
{
return
nil
,
err
}
if
!
ok
{
// FIXME -> key must be included into some node.hole
continue
// key not covered
}
// XXX check for anode.node.(*Tree) (ø tree case)
// - bucket if that bucket is reached for the first time
if
!
anode
.
done
{
δA
,
err
:=
diffB
(
ctx
,
anode
.
node
.
(
*
Bucket
),
nil
)
if
err
!=
nil
{
return
nil
,
err
}
// XXX also extract holes
// δ <- δA
err
=
δMerge
(
δ
,
δA
)
if
err
!=
nil
{
return
nil
,
err
}
// Adone <- δA
// Bqueue <- δA
for
k_
:=
range
δA
{
Adone
.
Add
(
k_
)
if
!
Bdone
.
Has
(
k_
)
{
Bqueue
.
Add
(
k_
)
}
}
anode
.
done
=
true
}
// k is not there -> -[k]ø
if
!
Adone
.
Has
(
k
)
{
// XXX do we need to add(?) [k]ø->ø ?
Adone
.
Add
(
k
)
}
tracef
(
" av: %s
\n
"
,
av
)
}
}
return
δ
,
nil
}
// δMerge merges changes from δ2 into δ.
// δ is total-building diff, while δ2 is diff from comparing some subnodes.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment