Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neo
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Labels
Merge Requests
2
Merge Requests
2
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Jobs
Commits
Open sidebar
Kirill Smelkov
neo
Commits
0f310080
Commit
0f310080
authored
Sep 06, 2017
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
X fix Client.withOperational - was not working properly because initial .opReady was nil
parent
ae1b31de
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
52 additions
and
23 deletions
+52
-23
go/neo/client/client.go
go/neo/client/client.go
+48
-20
go/neo/parttab.go
go/neo/parttab.go
+2
-1
go/neo/server/master.go
go/neo/server/master.go
+1
-1
go/xcommon/xtesting/xtesting.go
go/xcommon/xtesting/xtesting.go
+1
-1
No files found.
go/neo/client/client.go
View file @
0f310080
...
...
@@ -62,6 +62,8 @@ type Client struct {
// - .ClusterState = RUNNING <- XXX needed?
//
// however master link is accessed separately (see ^^^ and masterLink)
//
// protected by .node.StateMu
operational
bool
// XXX <- somehow move to NodeApp?
opReady
chan
struct
{}
// reinitialized each time state becomes non-operational
}
...
...
@@ -77,8 +79,10 @@ func (c *Client) StorageName() string {
// It will connect to master @masterAddr and identify with sepcified cluster name.
func
NewClient
(
clusterName
,
masterAddr
string
,
net
xnet
.
Networker
)
*
Client
{
cli
:=
&
Client
{
node
:
neo
.
NewNodeApp
(
net
,
neo
.
CLIENT
,
clusterName
,
masterAddr
,
""
),
mlinkReady
:
make
(
chan
struct
{}),
node
:
neo
.
NewNodeApp
(
net
,
neo
.
CLIENT
,
clusterName
,
masterAddr
,
""
),
mlinkReady
:
make
(
chan
struct
{}),
operational
:
false
,
opReady
:
make
(
chan
struct
{}),
}
// spawn background process which performs master talk
...
...
@@ -125,6 +129,42 @@ func (c *Client) masterLink(ctx context.Context) (*neo.NodeLink, error) {
}
}
// updateOperational updates .operational from current state.
//
// Must be called with .node.StateMu lock held.
//
// Returned sendReady func must be called by updateOperational caller after
// .node.StateMu lock is released - it will close current .opReady this way
// notifying .operational waiters.
//
// XXX move somehow -> NodeApp?
func
(
c
*
Client
)
updateOperational
()
(
sendReady
func
())
{
// XXX py client does not wait for cluster state = running
operational
:=
// c.node.ClusterState == neo.ClusterRunning &&
c
.
node
.
PartTab
.
OperationalWith
(
c
.
node
.
NodeTab
)
//fmt.Printf("\nupdateOperatinal: %v\n", operational)
//fmt.Println(c.node.PartTab)
//fmt.Println(c.node.NodeTab)
var
opready
chan
struct
{}
if
operational
!=
c
.
operational
{
c
.
operational
=
operational
if
operational
{
opready
=
c
.
opReady
// don't close from under StateMu
}
else
{
c
.
opReady
=
make
(
chan
struct
{})
// remake for next operational waiters
}
}
return
func
()
{
if
opready
!=
nil
{
//fmt.Println("updateOperational - notifying %v\n", opready)
close
(
opready
)
}
}
}
// withOperational waits for cluster state to be operational.
//
// If successful it returns with operational state RLocked (c.node.StateMu) and
...
...
@@ -141,6 +181,8 @@ func (c *Client) withOperational(ctx context.Context) error {
ready
:=
c
.
opReady
c
.
node
.
StateMu
.
RUnlock
()
//fmt.Printf("withOperational - waiting on %v\n", ready)
select
{
case
<-
ctx
.
Done
()
:
return
ctx
.
Err
()
...
...
@@ -266,25 +308,9 @@ func (c *Client) recvMaster(ctx context.Context, mlink *neo.NodeLink) (err error
}
// update .operational + notify those who was waiting for it
// XXX py client does not wait for cluster state = running
operational
:=
// c.node.ClusterState == neo.ClusterRunning &&
c
.
node
.
PartTab
.
OperationalWith
(
c
.
node
.
NodeTab
)
var
opready
chan
struct
{}
if
operational
!=
c
.
operational
{
c
.
operational
=
operational
if
operational
{
opready
=
c
.
opReady
// don't close from under StateMu
}
else
{
c
.
opReady
=
make
(
chan
struct
{})
}
}
opready
:=
c
.
updateOperational
()
c
.
node
.
StateMu
.
Unlock
()
if
opready
!=
nil
{
close
(
opready
)
}
opready
()
}
}
...
...
@@ -302,7 +328,9 @@ func (c *Client) initFromMaster(ctx context.Context, mlink *neo.NodeLink) (err e
log
.
Infof
(
ctx
,
"master initialized us with next parttab:
\n
%s"
,
pt
)
c
.
node
.
StateMu
.
Lock
()
c
.
node
.
PartTab
=
pt
opready
:=
c
.
updateOperational
()
c
.
node
.
StateMu
.
Unlock
()
opready
()
/*
XXX don't need this in init?
...
...
go/neo/parttab.go
View file @
0f310080
...
...
@@ -164,7 +164,7 @@ func MakePartTab(np int, nodev []*Node) *PartitionTable {
for
i
,
j
:=
0
,
0
;
i
<
np
;
i
,
j
=
i
+
1
,
j
+
1
%
len
(
nodev
)
{
node
:=
nodev
[
j
]
// XXX assert node.State > DOWN
fmt
.
Printf
(
"tab[%d] <- %v
\n
"
,
i
,
node
.
UUID
)
//
fmt.Printf("tab[%d] <- %v\n", i, node.UUID)
tab
[
i
]
=
[]
Cell
{{
CellInfo
:
CellInfo
{
node
.
UUID
,
UP_TO_DATE
/*XXX ok?*/
}}}
}
...
...
@@ -261,6 +261,7 @@ func (pt *PartitionTable) Dump() []RowInfo { // XXX also include .ptid? -> struc
func
PartTabFromDump
(
ptid
PTid
,
rowv
[]
RowInfo
)
*
PartitionTable
{
// reconstruct partition table from response
pt
:=
&
PartitionTable
{}
pt
.
PTid
=
ptid
for
_
,
row
:=
range
rowv
{
i
:=
row
.
Offset
...
...
go/neo/server/master.go
View file @
0f310080
...
...
@@ -457,7 +457,6 @@ loop2:
// if we are starting for new cluster - create partition table
if
m
.
node
.
PartTab
.
PTid
==
0
{
log
.
Infof
(
ctx
,
"creating new partition table"
)
// XXX -> m.nodeTab.StorageList(State > DOWN)
storv
:=
[]
*
neo
.
Node
{}
for
_
,
stor
:=
range
m
.
node
.
NodeTab
.
StorageList
()
{
...
...
@@ -467,6 +466,7 @@ loop2:
}
m
.
node
.
PartTab
=
neo
.
MakePartTab
(
1
/* XXX hardcoded */
,
storv
)
m
.
node
.
PartTab
.
PTid
=
1
log
.
Infof
(
ctx
,
"creating new partition table: %s"
,
m
.
node
.
PartTab
)
}
return
nil
...
...
go/xcommon/xtesting/xtesting.go
View file @
0f310080
...
...
@@ -59,7 +59,7 @@ func NewSyncTracer() *SyncTracer {
// Trace1 sends message with 1 tracing event to a consumer and waits for ack
func
(
st
*
SyncTracer
)
Trace1
(
event
interface
{})
{
ack
:=
make
(
chan
struct
{})
fmt
.
Printf
(
"trace: send: %T %v
\n
"
,
event
,
event
)
//
fmt.Printf("trace: send: %T %v\n", event, event)
st
.
tracech
<-
&
SyncTraceMsg
{
event
,
ack
}
<-
ack
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment