Commit bfb2a7ee authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 807d79ae
......@@ -97,6 +97,73 @@ func (m *tMaster) Run(ctx context.Context) error {
// ----------------------------------------
/*
func TestMasterStorage0(t0 *testing.T) {
t := NewTestCluster(t0, "abc1")
defer t.Stop()
M := t.NewMaster("m")
S := t.NewStorage("s") //, "m:1") // XXX do we need to provide Mlist here?
C := t.NewClient("c")
tM := t.Checker("m.main")
tMS := t.Checker("m-s")
tSM := t.Checker("s-m")
// M starts listening
tM.Expect(netlisten("m:1"))
tM.Expect(δnode("m", "m:1", proto.MASTER, 1, proto.RUNNING, proto.IdTimeNone))
tM.Expect(clusterState("m", proto.ClusterRecovering))
// TODO create C; C tries connect to master - rejected ("not yet operational")
// S starts listening
tS.Expect(netlisten("s:1"))
// S connects M
tSM.Expect(netconnect("s:2", "m:2", "m:1"))
tSM.Expect(conntx("s:2", "m:2", 1, &proto.RequestIdentification{
NodeType: proto.STORAGE,
UUID: 0,
Address: xnaddr("s:1"),
ClusterName: "abc1",
IdTime: proto.IdTimeNone,
}))
tM.Expect(δnode("m", "s:1", proto.STORAGE, 1, proto.PENDING, 0.01))
tSM.Expect(conntx("m:2", "s:2", 1, &proto.AcceptIdentification{
NodeType: proto.MASTER,
MyUUID: proto.UUID(proto.MASTER, 1),
NumPartitions: 1,
NumReplicas: 0,
YourUUID: proto.UUID(proto.STORAGE, 1),
}))
// TODO test ID rejects (uuid already registered, ...)
// M starts recovery on S
tMS.Expect(conntx("m:2", "s:2", 0, &proto.Recovery{}))
tMS.Expect(conntx("s:2", "m:2", 0, &proto.AnswerRecovery{
// empty new node
PTid: 0,
BackupTid: proto.INVALID_TID,
TruncateTid: proto.INVALID_TID,
}))
tMS.Expect(conntx("m:2", "s:2", 2, &proto.AskPartitionTable{}))
tMS.Expect(conntx("s:2", "m:2", 2, &proto.AnswerPartitionTable{
PTid: 0,
RowList: []proto.RowInfo{},
}))
// M ready to start: new cluster, no in-progress S recovery
tM.Expect(masterStartReady("m", true))
}
*/
// M drives cluster with 1 S & C through recovery -> verification -> service -> shutdown
func TestMasterStorage(t *testing.T) {
rt := NewEventRouter()
......@@ -117,7 +184,7 @@ func TestMasterStorage(t *testing.T) {
cM := tracetest.NewSyncChan("m.main") // trace of events local to M
cS := tracetest.NewSyncChan("s.main") // trace of events local to S XXX with cause root also on S
// cC := tracetest.NewSyncChan("c.main")
cC := tracetest.NewSyncChan("c.main")
cMS := tracetest.NewSyncChan("m-s") // trace of events with cause root being m -> s send
cSM := tracetest.NewSyncChan("s-m") // trace of events with cause root being s -> m send
cMC := tracetest.NewSyncChan("m-c") // ----//---- m -> c
......@@ -126,7 +193,7 @@ func TestMasterStorage(t *testing.T) {
tM := tracetest.NewEventChecker(t, dispatch, cM)
tS := tracetest.NewEventChecker(t, dispatch, cS)
// tC := tracetest.NewEventChecker(t, dispatch, cC) // XXX no need
tC := tracetest.NewEventChecker(t, dispatch, cC)
tMS := tracetest.NewEventChecker(t, dispatch, cMS)
tSM := tracetest.NewEventChecker(t, dispatch, cSM)
tMC := tracetest.NewEventChecker(t, dispatch, cMC)
......@@ -139,8 +206,9 @@ func TestMasterStorage(t *testing.T) {
rt.BranchLink("s-m", cSM, cMS)
rt.BranchLink("c-m", cCM, cMC)
rt.BranchLink("c-s", cCS, rt.defaultq /* S never pushes to C */)
rt.BranchState("s", cMS) // state on S is controlled by M
rt.BranchState("c", cMC) // state on C is controlled by M
// rt.BranchState("s", cMS) // state on S is controlled by M
// rt.BranchState("c", cMC) // state on C is controlled by M
rt.BranchNode("c", cC)
// cluster nodes
M := tNewMaster("abc1", ":1", Mhost)
......@@ -345,9 +413,9 @@ func TestMasterStorage(t *testing.T) {
},
}))
tMC.Expect(δnode("c", "m:1", proto.MASTER, 1, proto.RUNNING, proto.IdTimeNone))
tMC.Expect(δnode("c", "s:1", proto.STORAGE, 1, proto.RUNNING, 0.01))
tMC.Expect(δnode("c", "", proto.CLIENT, 1, proto.RUNNING, 0.02))
tC.Expect(δnode("c", "m:1", proto.MASTER, 1, proto.RUNNING, proto.IdTimeNone))
tC.Expect(δnode("c", "s:1", proto.STORAGE, 1, proto.RUNNING, 0.01))
tC.Expect(δnode("c", "", proto.CLIENT, 1, proto.RUNNING, 0.02))
// ----------------------------------------
......
// Copyright (C) 2017-2018 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package neo
// infrastructure for creating NEO test clusters.
import (
"testing"
"lab.nexedi.com/kirr/go123/xnet/pipenet"
)
// TestCluster ... XXX
type TestCluster struct {
name string
net *pipenet.Network // XXX -> lo
gotracer *TraceCollector // XXX -> GoTracer
//tpy *PyTracer
ttest testing.TB // original testing env this cluster was created at
}
// XXX stub
type ITestMaster interface {}
type ITestStorage interface {}
type ITestClient interface {}
// NewTestCluster creates new NEO test cluster.
//
// XXX ...
//
// XXX defer t.Stop()
func NewTestCluster(ttest testing.TB, name string) *TestCluster {
return &TestCluster{
name: name,
//... XXX
ttest: ttest,
}
}
// Stop stops the cluster.
//
// All processes of the cluster are stopped ... XXX
// XXX do we need error return?
func (t *TestCluster) Stop() error {
//... XXX
t.gotracer.Detach()
//XXX t.pytracer.Detach()
return nil
}
// NewMaster creates new master on node.
//
// The master will be accepting incoming connections at node:1.
// The node must be not yet existing and will be dedicated to the created master fully. XXX
//
// XXX error of creating py process?
func (t *TestCluster) NewMaster(node string) ITestMaster {
//... XXX
// XXX check name is unique host name - not already registered
// XXX set M clock to vclock.monotime
// tracetest.NewSyncChan("m.main")
// foreach node1,node2:
// tracetest.NewChan("node1-node2") // trace of events with cause root being n1 -> n2 send
// tracetest.NewChan("node2-node1") // trace of events with cause root being n2 -> n1 send
// for each created tracetest.Chan -> create tracetest.EventChecker
//rt.BranchNode("m", cM)
//rt.BranchState("m",
//rt.BranchLink("n1-n2", ..., ...)
// XXX state on S,C is controlled by M:
// rt.BranchState("s", cMS)
return nil
}
func (t *TestCluster) NewStorage(node string) ITestStorage {
panic("TODO")
}
func (t *TestCluster) NewClient(node string) ITestClient {
panic("TODO")
}
......@@ -154,6 +154,13 @@ type EventRouter struct {
byNode map[string /*host*/]*tracetest.SyncChan
// state on host changes. Takes precendece over byNode.
//
// XXX not needed? ( I was once considering state change events on C to
// be routed to MC, because state change on C is due to M sends.
// However everything is correct if we put those C state changes on to
// byNode("C") and simply verify events on tMC and then tC in that order.
// keeping events local to C on tC, not tMC helps TestCluster to
// organize trace channels in uniform way )
byState map[string /*host*/]*tracetest.SyncChan
// event on a-b link
......@@ -264,7 +271,7 @@ func (r *EventRouter) Route(event interface{}) (dst *tracetest.SyncChan) {
break // link not branched
}
// now as we ldst.a corresponds to who was dialer and ldst.b
// now as ldst.a corresponds to who was dialer and ldst.b
// corresponds to who was listener, we can route by ConnID.
// (see neo.newNodeLink for details)
if ev.ConnID % 2 == 1 {
......@@ -315,6 +322,8 @@ func (r *EventRouter) BranchNode(host string, dst *tracetest.SyncChan) {
}
// BranchState branches events corresponding to state changes on host.
//
// XXX not needed?
func (r *EventRouter) BranchState(host string, dst *tracetest.SyncChan) {
r.mu.Lock()
defer r.mu.Unlock()
......
......@@ -18,6 +18,7 @@
// See https://www.nexedi.com/licensing for rationale and options.
package neo
// misc testing utilities.
import (
"context"
......
// Copyright (C) 2018 Nexedi SA and Contributors.
// Kirill Smelkov <kirr@nexedi.com>
//
// This program is free software: you can Use, Study, Modify and Redistribute
// it under the terms of the GNU General Public License version 3, or (at your
// option) any later version, as published by the Free Software Foundation.
//
// You can also Link and Combine this program with other software covered by
// the terms of any of the Free Software licenses or any of the Open Source
// Initiative approved licenses and Convey the resulting work. Corresponding
// source of such a combination shall include the source code for all other
// software used.
//
// This program is distributed WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
//
// See COPYING file for full licensing terms.
// See https://www.nexedi.com/licensing for rationale and options.
package neo
// NEO/py event tracer
// TODO
......@@ -349,6 +349,29 @@ func (d *EventDispatcher) Dispatch(event interface{}) {
outch := d.rt.Route(event)
// XXX if nil?
// TODO it is possible to emperically detect here if a test incorrectly
// decomposed its system into serial streams: consider unrelated to each
// other events A and B are incorrectly routed to the same channel. It
// could be so happenning that the order of checks on the test side is
// almost always correct and so the error is not visible. However
//
// if we add delays to delivery of either A or B
// and test both combinations
//
// we will for sure detect the error as, if A and B are indeed
// unrelated, one of the delay combination will result in events
// delivered to test in different to what it expects order.
//
// the time for delay could be taken as follows:
//
// - run the test without delay; collect δt between events on particular stream
// - take delay = max(δt)·10
//
// to make sure there is indeed no different orderings possible on the
// stream, rerun the test N(event-on-stream) times, and during i'th run
// delay i'th event.
// TODO timeout: deadlock? (print all-in-flight events on timout)
// XXX or better ^^^ to do on receiver side?
//
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment