Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
c1f04bf0
Commit
c1f04bf0
authored
May 09, 2005
by
joreland@mysql.com
Browse files
Options
Browse Files
Download
Plain Diff
Merge joreland@bk-internal.mysql.com:/home/bk/mysql-4.1
into mysql.com:/home/jonas/src/mysql-4.1
parents
12621f34
a36b2e39
Changes
24
Hide whitespace changes
Inline
Side-by-side
Showing
24 changed files
with
350 additions
and
585 deletions
+350
-585
ndb/src/cw/cpcd/APIService.cpp
ndb/src/cw/cpcd/APIService.cpp
+1
-0
ndb/src/cw/cpcd/CPCD.hpp
ndb/src/cw/cpcd/CPCD.hpp
+6
-0
ndb/src/cw/cpcd/Process.cpp
ndb/src/cw/cpcd/Process.cpp
+8
-1
ndb/src/kernel/blocks/dbtc/Dbtc.hpp
ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+2
-28
ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
+1
-3
ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+40
-48
ndb/src/kernel/error/ErrorReporter.cpp
ndb/src/kernel/error/ErrorReporter.cpp
+3
-2
ndb/src/mgmapi/mgmapi.cpp
ndb/src/mgmapi/mgmapi.cpp
+6
-0
ndb/src/ndbapi/ClusterMgr.cpp
ndb/src/ndbapi/ClusterMgr.cpp
+7
-6
ndb/src/ndbapi/ClusterMgr.hpp
ndb/src/ndbapi/ClusterMgr.hpp
+14
-0
ndb/src/ndbapi/ndberror.c
ndb/src/ndbapi/ndberror.c
+1
-1
ndb/test/include/CpcClient.hpp
ndb/test/include/CpcClient.hpp
+1
-0
ndb/test/ndbapi/testNodeRestart.cpp
ndb/test/ndbapi/testNodeRestart.cpp
+1
-1
ndb/test/run-test/Makefile.am
ndb/test/run-test/Makefile.am
+6
-1
ndb/test/run-test/conf-daily-basic-dl145a.txt
ndb/test/run-test/conf-daily-basic-dl145a.txt
+19
-0
ndb/test/run-test/conf-daily-basic-ndbmaster.txt
ndb/test/run-test/conf-daily-basic-ndbmaster.txt
+19
-0
ndb/test/run-test/conf-daily-basic-shark.txt
ndb/test/run-test/conf-daily-basic-shark.txt
+19
-0
ndb/test/run-test/conf-daily-devel-ndbmaster.txt
ndb/test/run-test/conf-daily-devel-ndbmaster.txt
+19
-0
ndb/test/run-test/conf-daily-sql-ndbmaster.txt
ndb/test/run-test/conf-daily-sql-ndbmaster.txt
+20
-0
ndb/test/run-test/main.cpp
ndb/test/run-test/main.cpp
+21
-22
ndb/test/run-test/make-config.sh
ndb/test/run-test/make-config.sh
+81
-445
ndb/test/run-test/ndb-autotest.sh
ndb/test/run-test/ndb-autotest.sh
+51
-27
ndb/test/run-test/run-test.hpp
ndb/test/run-test/run-test.hpp
+1
-0
ndb/test/src/CpcClient.cpp
ndb/test/src/CpcClient.cpp
+3
-0
No files found.
ndb/src/cw/cpcd/APIService.cpp
View file @
c1f04bf0
...
...
@@ -122,6 +122,7 @@ ParserRow<CPCDAPISession> commands[] =
CPCD_ARG
(
"stderr"
,
String
,
Optional
,
"Redirection of stderr"
),
CPCD_ARG
(
"stdin"
,
String
,
Optional
,
"Redirection of stderr"
),
CPCD_ARG
(
"ulimit"
,
String
,
Optional
,
"ulimit"
),
CPCD_ARG
(
"shutdown"
,
String
,
Optional
,
"shutdown options"
),
CPCD_CMD
(
"undefine process"
,
&
CPCDAPISession
::
undefineProcess
,
""
),
CPCD_CMD_ALIAS
(
"undef"
,
"undefine process"
,
0
),
...
...
ndb/src/cw/cpcd/CPCD.hpp
View file @
c1f04bf0
...
...
@@ -243,6 +243,12 @@ public:
* @desc Format c:unlimited d:0 ...
*/
BaseString
m_ulimit
;
/**
* @brief shutdown options
*/
BaseString
m_shutdown_options
;
private:
class
CPCD
*
m_cpcd
;
void
do_exec
();
...
...
ndb/src/cw/cpcd/Process.cpp
View file @
c1f04bf0
...
...
@@ -44,6 +44,8 @@ CPCD::Process::print(FILE * f){
fprintf
(
f
,
"stdout: %s
\n
"
,
m_stdout
.
c_str
()
?
m_stdout
.
c_str
()
:
""
);
fprintf
(
f
,
"stderr: %s
\n
"
,
m_stderr
.
c_str
()
?
m_stderr
.
c_str
()
:
""
);
fprintf
(
f
,
"ulimit: %s
\n
"
,
m_ulimit
.
c_str
()
?
m_ulimit
.
c_str
()
:
""
);
fprintf
(
f
,
"shutdown: %s
\n
"
,
m_shutdown_options
.
c_str
()
?
m_shutdown_options
.
c_str
()
:
""
);
}
CPCD
::
Process
::
Process
(
const
Properties
&
props
,
class
CPCD
*
cpcd
)
{
...
...
@@ -64,6 +66,7 @@ CPCD::Process::Process(const Properties & props, class CPCD *cpcd) {
props
.
get
(
"stdout"
,
m_stdout
);
props
.
get
(
"stderr"
,
m_stderr
);
props
.
get
(
"ulimit"
,
m_ulimit
);
props
.
get
(
"shutdown"
,
m_shutdown_options
);
m_status
=
STOPPED
;
if
(
strcasecmp
(
m_type
.
c_str
(),
"temporary"
)
==
0
){
...
...
@@ -454,7 +457,11 @@ CPCD::Process::stop() {
m_status
=
STOPPING
;
errno
=
0
;
int
ret
=
kill
(
-
m_pid
,
SIGTERM
);
int
signo
=
SIGTERM
;
if
(
m_shutdown_options
==
"SIGKILL"
)
signo
=
SIGKILL
;
int
ret
=
kill
(
-
m_pid
,
signo
);
switch
(
ret
)
{
case
0
:
logger
.
debug
(
"Sent SIGTERM to pid %d"
,
(
int
)
-
m_pid
);
...
...
ndb/src/kernel/blocks/dbtc/Dbtc.hpp
View file @
c1f04bf0
...
...
@@ -585,34 +585,8 @@ public:
*/
ArrayPool
<
TcIndexOperation
>
c_theIndexOperationPool
;
/**
* The list of index operations
*/
ArrayList
<
TcIndexOperation
>
c_theIndexOperations
;
UintR
c_maxNumberOfIndexOperations
;
struct
TcSeizedIndexOperation
{
/**
* Next ptr (used in pool/list)
*/
union
{
Uint32
nextPool
;
Uint32
nextList
;
};
/**
* Prev pointer (used in list)
*/
Uint32
prevList
;
};
/**
* Pool of seized index operations
*/
ArrayPool
<
TcSeizedIndexOperation
>
c_theSeizedIndexOperationPool
;
typedef
Ptr
<
TcSeizedIndexOperation
>
TcSeizedIndexOperationPtr
;
/************************** API CONNECT RECORD ***********************
* The API connect record contains the connection record to which the
* application connects.
...
...
@@ -650,7 +624,7 @@ public:
struct
ApiConnectRecord
{
ApiConnectRecord
(
ArrayPool
<
TcFiredTriggerData
>
&
firedTriggerPool
,
ArrayPool
<
Tc
Seized
IndexOperation
>
&
seizedIndexOpPool
)
:
ArrayPool
<
TcIndexOperation
>
&
seizedIndexOpPool
)
:
theFiredTriggers
(
firedTriggerPool
),
isIndexOp
(
false
),
theSeizedIndexOperations
(
seizedIndexOpPool
)
...
...
@@ -763,7 +737,7 @@ public:
UintR
accumulatingIndexOp
;
UintR
executingIndexOp
;
UintR
tcIndxSendArray
[
6
];
ArrayList
<
Tc
Seized
IndexOperation
>
theSeizedIndexOperations
;
ArrayList
<
TcIndexOperation
>
theSeizedIndexOperations
;
};
typedef
Ptr
<
ApiConnectRecord
>
ApiConnectRecordPtr
;
...
...
ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
View file @
c1f04bf0
...
...
@@ -65,7 +65,6 @@ void Dbtc::initData()
c_theFiredTriggerPool
.
setSize
(
c_maxNumberOfFiredTriggers
);
c_theIndexPool
.
setSize
(
c_maxNumberOfIndexes
);
c_theIndexOperationPool
.
setSize
(
c_maxNumberOfIndexOperations
);
c_theSeizedIndexOperationPool
.
setSize
(
c_maxNumberOfIndexOperations
);
c_theAttributeBufferPool
.
setSize
(
c_transactionBufferSpace
);
c_firedTriggerHash
.
setSize
((
c_maxNumberOfFiredTriggers
+
10
)
/
10
);
}
//Dbtc::initData()
...
...
@@ -85,7 +84,7 @@ void Dbtc::initRecords()
for
(
unsigned
i
=
0
;
i
<
capiConnectFilesize
;
i
++
)
{
p
=
&
apiConnectRecord
[
i
];
new
(
p
)
ApiConnectRecord
(
c_theFiredTriggerPool
,
c_the
Seized
IndexOperationPool
);
c_theIndexOperationPool
);
}
// Init all fired triggers
DLFifoList
<
TcFiredTriggerData
>
triggers
(
c_theFiredTriggerPool
);
...
...
@@ -177,7 +176,6 @@ Dbtc::Dbtc(const class Configuration & conf):
c_maxNumberOfFiredTriggers
(
0
),
c_theIndexes
(
c_theIndexPool
),
c_maxNumberOfIndexes
(
0
),
c_theIndexOperations
(
c_theIndexOperationPool
),
c_maxNumberOfIndexOperations
(
0
),
m_commitAckMarkerHash
(
m_commitAckMarkerPool
)
{
...
...
ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
View file @
c1f04bf0
...
...
@@ -11161,18 +11161,18 @@ void Dbtc::execTCINDXREQ(Signal* signal)
jam
();
// This is a newly started transaction, clean-up
releaseAllSeizedIndexOperations
(
regApiPtr
);
regApiPtr
->
transid
[
0
]
=
tcIndxReq
->
transId1
;
regApiPtr
->
transid
[
1
]
=
tcIndxReq
->
transId2
;
}
//if
if
(
!
seizeIndexOperation
(
regApiPtr
,
indexOpPtr
))
{
if
(
ERROR_INSERTED
(
8036
)
||
!
seizeIndexOperation
(
regApiPtr
,
indexOpPtr
))
{
jam
();
// Failed to allocate index operation
TcIndxRef
*
const
tcIndxRef
=
(
TcIndxRef
*
)
signal
->
getDataPtrSend
();
tcIndxRef
->
connectPtr
=
tcIndxReq
->
senderData
;
tcIndxRef
->
transId
[
0
]
=
regApiPtr
->
transid
[
0
];
tcIndxRef
->
transId
[
1
]
=
regApiPtr
->
transid
[
1
];
tcIndxRef
->
errorCode
=
4000
;
sendSignal
(
regApiPtr
->
ndbapiBlockref
,
GSN_TCINDXREF
,
signal
,
TcIndxRef
::
SignalLength
,
JBB
);
terrorCode
=
288
;
regApiPtr
->
m_exec_flag
|=
TcKeyReq
::
getExecuteFlag
(
tcIndxRequestInfo
);
apiConnectptr
=
transPtr
;
abortErrorLab
(
signal
);
return
;
}
TcIndexOperation
*
indexOp
=
indexOpPtr
.
p
;
...
...
@@ -11307,15 +11307,17 @@ void Dbtc::execINDXKEYINFO(Signal* signal)
TcIndexOperationPtr
indexOpPtr
;
TcIndexOperation
*
indexOp
;
indexOpPtr
.
i
=
regApiPtr
->
accumulatingIndexOp
;
indexOp
=
c_theIndexOperations
.
getPtr
(
indexOpPtr
.
i
);
if
(
saveINDXKEYINFO
(
signal
,
indexOp
,
src
,
keyInfoLength
))
{
jam
();
// We have received all we need
readIndexTable
(
signal
,
regApiPtr
,
indexOp
);
if
((
indexOpPtr
.
i
=
regApiPtr
->
accumulatingIndexOp
)
!=
RNIL
)
{
indexOp
=
c_theIndexOperationPool
.
getPtr
(
indexOpPtr
.
i
);
if
(
saveINDXKEYINFO
(
signal
,
indexOp
,
src
,
keyInfoLength
))
{
jam
();
// We have received all we need
readIndexTable
(
signal
,
regApiPtr
,
indexOp
);
}
}
}
...
...
@@ -11338,15 +11340,17 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
TcIndexOperationPtr
indexOpPtr
;
TcIndexOperation
*
indexOp
;
indexOpPtr
.
i
=
regApiPtr
->
accumulatingIndexOp
;
indexOp
=
c_theIndexOperations
.
getPtr
(
indexOpPtr
.
i
);
if
(
saveINDXATTRINFO
(
signal
,
indexOp
,
src
,
attrInfoLength
))
{
jam
();
// We have received all we need
readIndexTable
(
signal
,
regApiPtr
,
indexOp
);
if
((
indexOpPtr
.
i
=
regApiPtr
->
accumulatingIndexOp
)
!=
RNIL
)
{
indexOp
=
c_theIndexOperationPool
.
getPtr
(
indexOpPtr
.
i
);
if
(
saveINDXATTRINFO
(
signal
,
indexOp
,
src
,
attrInfoLength
))
{
jam
();
// We have received all we need
readIndexTable
(
signal
,
regApiPtr
,
indexOp
);
}
}
}
...
...
@@ -11371,7 +11375,7 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal,
releaseIndexOperation
(
apiConnectptr
.
p
,
indexOp
);
terrorCode
=
4000
;
abortErrorLab
(
signal
);
return
tru
e
;
return
fals
e
;
}
if
(
receivedAllINDXKEYINFO
(
indexOp
)
&&
receivedAllINDXATTRINFO
(
indexOp
))
{
jam
();
...
...
@@ -11404,7 +11408,7 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal,
releaseIndexOperation
(
apiConnectptr
.
p
,
indexOp
);
terrorCode
=
4000
;
abortErrorLab
(
signal
);
return
tru
e
;
return
fals
e
;
}
if
(
receivedAllINDXKEYINFO
(
indexOp
)
&&
receivedAllINDXATTRINFO
(
indexOp
))
{
jam
();
...
...
@@ -11464,7 +11468,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
jamEntry
();
indexOpPtr
.
i
=
tcKeyConf
->
apiConnectPtr
;
TcIndexOperation
*
indexOp
=
c_theIndexOperation
s
.
getPtr
(
indexOpPtr
.
i
);
TcIndexOperation
*
indexOp
=
c_theIndexOperation
Pool
.
getPtr
(
indexOpPtr
.
i
);
Uint32
confInfo
=
tcKeyConf
->
confInfo
;
/**
...
...
@@ -11553,7 +11557,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
jamEntry
();
indexOpPtr
.
i
=
tcKeyRef
->
connectPtr
;
TcIndexOperation
*
indexOp
=
c_theIndexOperation
s
.
getPtr
(
indexOpPtr
.
i
);
TcIndexOperation
*
indexOp
=
c_theIndexOperation
Pool
.
getPtr
(
indexOpPtr
.
i
);
indexOpPtr
.
p
=
indexOp
;
if
(
!
indexOp
)
{
jam
();
...
...
@@ -11654,7 +11658,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
jamEntry
();
TcIndexOperationPtr
indexOpPtr
;
indexOpPtr
.
i
=
transIdAI
->
connectPtr
;
TcIndexOperation
*
indexOp
=
c_theIndexOperation
s
.
getPtr
(
indexOpPtr
.
i
);
TcIndexOperation
*
indexOp
=
c_theIndexOperation
Pool
.
getPtr
(
indexOpPtr
.
i
);
indexOpPtr
.
p
=
indexOp
;
if
(
!
indexOp
)
{
jam
();
...
...
@@ -11762,7 +11766,7 @@ void Dbtc::execTCROLLBACKREP(Signal* signal)
jamEntry
();
TcIndexOperationPtr
indexOpPtr
;
indexOpPtr
.
i
=
tcRollbackRep
->
connectPtr
;
TcIndexOperation
*
indexOp
=
c_theIndexOperation
s
.
getPtr
(
indexOpPtr
.
i
);
TcIndexOperation
*
indexOp
=
c_theIndexOperation
Pool
.
getPtr
(
indexOpPtr
.
i
);
indexOpPtr
.
p
=
indexOp
;
tcRollbackRep
=
(
TcRollbackRep
*
)
signal
->
getDataPtrSend
();
tcRollbackRep
->
connectPtr
=
indexOp
->
tcIndxReq
.
senderData
;
...
...
@@ -12090,16 +12094,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
bool
Dbtc
::
seizeIndexOperation
(
ApiConnectRecord
*
regApiPtr
,
TcIndexOperationPtr
&
indexOpPtr
)
{
bool
seizeOk
;
seizeOk
=
c_theIndexOperations
.
seize
(
indexOpPtr
);
if
(
seizeOk
)
{
jam
();
TcSeizedIndexOperationPtr
seizedIndexOpPtr
;
seizeOk
&=
regApiPtr
->
theSeizedIndexOperations
.
seizeId
(
seizedIndexOpPtr
,
indexOpPtr
.
i
);
}
return
seizeOk
;
return
regApiPtr
->
theSeizedIndexOperations
.
seize
(
indexOpPtr
);
}
void
Dbtc
::
releaseIndexOperation
(
ApiConnectRecord
*
regApiPtr
,
...
...
@@ -12113,18 +12108,16 @@ void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
indexOp
->
expectedTransIdAI
=
0
;
indexOp
->
transIdAI
.
release
();
regApiPtr
->
theSeizedIndexOperations
.
release
(
indexOp
->
indexOpId
);
c_theIndexOperations
.
release
(
indexOp
->
indexOpId
);
}
void
Dbtc
::
releaseAllSeizedIndexOperations
(
ApiConnectRecord
*
regApiPtr
)
{
Tc
Seized
IndexOperationPtr
seizedIndexOpPtr
;
TcIndexOperationPtr
seizedIndexOpPtr
;
regApiPtr
->
theSeizedIndexOperations
.
first
(
seizedIndexOpPtr
);
while
(
seizedIndexOpPtr
.
i
!=
RNIL
)
{
jam
();
TcIndexOperation
*
indexOp
=
c_theIndexOperations
.
getPtr
(
seizedIndexOpPtr
.
i
);
TcIndexOperation
*
indexOp
=
seizedIndexOpPtr
.
p
;
indexOp
->
indexOpState
=
IOS_NOOP
;
indexOp
->
expectedKeyInfo
=
0
;
...
...
@@ -12133,7 +12126,6 @@ void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr)
indexOp
->
attrInfo
.
release
();
indexOp
->
expectedTransIdAI
=
0
;
indexOp
->
transIdAI
.
release
();
c_theIndexOperations
.
release
(
seizedIndexOpPtr
.
i
);
regApiPtr
->
theSeizedIndexOperations
.
next
(
seizedIndexOpPtr
);
}
regApiPtr
->
theSeizedIndexOperations
.
release
();
...
...
ndb/src/kernel/error/ErrorReporter.cpp
View file @
c1f04bf0
...
...
@@ -130,7 +130,7 @@ ErrorReporter::formatMessage(ErrorCategory type,
"Date/Time: %s
\n
Type of error: %s
\n
"
"Message: %s
\n
Fault ID: %d
\n
Problem data: %s"
"
\n
Object of reference: %s
\n
ProgramName: %s
\n
"
"ProcessID: %d
\n
TraceFile: %s
\n
***EOM***
\n
"
,
"ProcessID: %d
\n
TraceFile: %s
\n
%s
\n
***EOM***
\n
"
,
formatTimeStampString
()
,
errorType
[
type
],
lookupErrorMessage
(
faultID
),
...
...
@@ -139,7 +139,8 @@ ErrorReporter::formatMessage(ErrorCategory type,
objRef
,
my_progname
,
processId
,
theNameOfTheTraceFile
?
theNameOfTheTraceFile
:
"<no tracefile>"
);
theNameOfTheTraceFile
?
theNameOfTheTraceFile
:
"<no tracefile>"
,
NDB_VERSION_STRING
);
// Add trailing blanks to get a fixed lenght of the message
while
(
strlen
(
messptr
)
<=
MESSAGE_LENGTH
-
3
){
...
...
ndb/src/mgmapi/mgmapi.cpp
View file @
c1f04bf0
...
...
@@ -857,7 +857,10 @@ ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes, const int * node_list,
args
.
put
(
"initialstart"
,
initial
);
args
.
put
(
"nostart"
,
nostart
);
const
Properties
*
reply
;
const
int
timeout
=
handle
->
read_timeout
;
handle
->
read_timeout
=
5
*
60
*
1000
;
// 5 minutes
reply
=
ndb_mgm_call
(
handle
,
restart_reply
,
"restart all"
,
&
args
);
handle
->
read_timeout
=
timeout
;
CHECK_REPLY
(
reply
,
-
1
);
BaseString
result
;
...
...
@@ -890,7 +893,10 @@ ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes, const int * node_list,
args
.
put
(
"nostart"
,
nostart
);
const
Properties
*
reply
;
const
int
timeout
=
handle
->
read_timeout
;
handle
->
read_timeout
=
5
*
60
*
1000
;
// 5 minutes
reply
=
ndb_mgm_call
(
handle
,
restart_reply
,
"restart node"
,
&
args
);
handle
->
read_timeout
=
timeout
;
if
(
reply
!=
NULL
)
{
BaseString
result
;
reply
->
get
(
"result"
,
result
);
...
...
ndb/src/ndbapi/ClusterMgr.cpp
View file @
c1f04bf0
...
...
@@ -66,6 +66,7 @@ ClusterMgr::ClusterMgr(TransporterFacade & _facade):
{
ndbSetOwnVersion
();
clusterMgrThreadMutex
=
NdbMutex_Create
();
noOfAliveNodes
=
0
;
noOfConnectedNodes
=
0
;
theClusterMgrThread
=
0
;
}
...
...
@@ -335,9 +336,9 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
node
.
m_state
=
apiRegConf
->
nodeState
;
if
(
node
.
compatible
&&
(
node
.
m_state
.
startLevel
==
NodeState
::
SL_STARTED
||
node
.
m_state
.
startLevel
==
NodeState
::
SL_SINGLEUSER
)){
node
.
m_alive
=
true
;
set_node_alive
(
node
,
true
)
;
}
else
{
node
.
m_alive
=
false
;
set_node_alive
(
node
,
false
)
;
}
//if
node
.
hbSent
=
0
;
node
.
hbCounter
=
0
;
...
...
@@ -360,7 +361,7 @@ ClusterMgr::execAPI_REGREF(const Uint32 * theData){
assert
(
node
.
defined
==
true
);
node
.
compatible
=
false
;
node
.
m_alive
=
false
;
set_node_alive
(
node
,
false
)
;
node
.
m_state
=
NodeState
::
SL_NOTHING
;
node
.
m_info
.
m_version
=
ref
->
version
;
...
...
@@ -437,7 +438,7 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){
Node
&
theNode
=
theNodes
[
nodeId
];
theNode
.
m_alive
=
false
;
set_node_alive
(
theNode
,
false
)
;
if
(
theNode
.
connected
)
theFacade
.
doDisconnect
(
nodeId
);
...
...
@@ -449,8 +450,8 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){
}
theNode
.
nfCompleteRep
=
false
;
if
(
noOf
Connected
Nodes
==
0
){
if
(
noOf
Alive
Nodes
==
0
){
NFCompleteRep
rep
;
for
(
Uint32
i
=
1
;
i
<
MAX_NODES
;
i
++
){
if
(
theNodes
[
i
].
defined
&&
theNodes
[
i
].
nfCompleteRep
==
false
){
...
...
ndb/src/ndbapi/ClusterMgr.hpp
View file @
c1f04bf0
...
...
@@ -80,6 +80,7 @@ public:
Uint32
getNoOfConnectedNodes
()
const
;
private:
Uint32
noOfAliveNodes
;
Uint32
noOfConnectedNodes
;
Node
theNodes
[
MAX_NODES
];
NdbThread
*
theClusterMgrThread
;
...
...
@@ -100,6 +101,19 @@ private:
void
execAPI_REGREF
(
const
Uint32
*
theData
);
void
execNODE_FAILREP
(
const
Uint32
*
theData
);
void
execNF_COMPLETEREP
(
const
Uint32
*
theData
);
inline
void
set_node_alive
(
Node
&
node
,
bool
alive
){
if
(
node
.
m_alive
&&
!
alive
)
{
assert
(
noOfAliveNodes
);
noOfAliveNodes
--
;
}
else
if
(
!
node
.
m_alive
&&
alive
)
{
noOfAliveNodes
++
;
}
node
.
m_alive
=
alive
;
}
};
inline
...
...
ndb/src/ndbapi/ndberror.c
View file @
c1f04bf0
...
...
@@ -169,7 +169,7 @@ ErrorBundle ErrorCodes[] = {
{
4021
,
TR
,
"Out of Send Buffer space in NDB API"
},
{
4022
,
TR
,
"Out of Send Buffer space in NDB API"
},
{
4032
,
TR
,
"Out of Send Buffer space in NDB API"
},
{
288
,
TR
,
"Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)"
},
/**
* InsufficientSpace
*/
...
...
ndb/test/include/CpcClient.hpp
View file @
c1f04bf0
...
...
@@ -56,6 +56,7 @@ public:
BaseString
m_stdout
;
BaseString
m_stderr
;
BaseString
m_ulimit
;
BaseString
m_shutdown_options
;
};
private:
...
...
ndb/test/ndbapi/testNodeRestart.cpp
View file @
c1f04bf0
...
...
@@ -359,7 +359,7 @@ int runLateCommit(NDBT_Context* ctx, NDBT_Step* step){
if
(
hugoOps
.
startTransaction
(
pNdb
)
!=
0
)
return
NDBT_FAILED
;
if
(
hugoOps
.
pkUpdateRecord
(
pNdb
,
1
)
!=
0
)
if
(
hugoOps
.
pkUpdateRecord
(
pNdb
,
1
,
128
)
!=
0
)
return
NDBT_FAILED
;
if
(
hugoOps
.
execute_NoCommit
(
pNdb
)
!=
0
)
...
...
ndb/test/run-test/Makefile.am
View file @
c1f04bf0
...
...
@@ -6,7 +6,12 @@ include $(top_srcdir)/ndb/config/type_util.mk.am
include
$(top_srcdir)/ndb/config/type_mgmapiclient.mk.am
test_PROGRAMS
=
atrt
test_DATA
=
daily-basic-tests.txt daily-devel-tests.txt
test_DATA
=
daily-basic-tests.txt daily-devel-tests.txt
\
conf-daily-basic-ndbmaster.txt
\
conf-daily-basic-shark.txt
\
conf-daily-devel-ndbmaster.txt
\
conf-daily-sql-ndbmaster.txt
\
conf-daily-basic-dl145a.txt
test_SCRIPTS
=
atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh
\
atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
...
...
ndb/test/run-test/conf-daily-basic-dl145a.txt
0 → 100644
View file @
c1f04bf0
baseport: 14000
basedir: /home/ndbdev/autotest/run
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /home/ndbdev/autotest/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
ndb/test/run-test/conf-daily-basic-ndbmaster.txt
0 → 100644
View file @
c1f04bf0
baseport: 14000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
ndb/test/run-test/conf-daily-basic-shark.txt
0 → 100644
View file @
c1f04bf0
baseport: 14000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host1 CHOOSE_host1
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
ndb/test/run-test/conf-daily-devel-ndbmaster.txt
0 → 100644
View file @
c1f04bf0
baseport: 16000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
ndb/test/run-test/conf-daily-sql-ndbmaster.txt
0 → 100644
View file @
c1f04bf0
baseport: 16000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3
mysqld: CHOOSE_host1 CHOOSE_host4
mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
ndb/test/run-test/main.cpp
View file @
c1f04bf0
...
...
@@ -116,10 +116,7 @@ main(int argc, const char ** argv){
*/
if
(
restart
){
g_logger
.
info
(
"(Re)starting ndb processes"
);
if
(
!
stop_processes
(
g_config
,
atrt_process
::
NDB_MGM
))
goto
end
;
if
(
!
stop_processes
(
g_config
,
atrt_process
::
NDB_DB
))
if
(
!
stop_processes
(
g_config
,
~
0
))
goto
end
;
if
(
!
start_processes
(
g_config
,
atrt_process
::
NDB_MGM
))
...
...
@@ -142,6 +139,9 @@ main(int argc, const char ** argv){
goto
end
;
started:
if
(
!
start_processes
(
g_config
,
p_servers
))
goto
end
;
g_logger
.
info
(
"Ndb start completed"
);
}
...
...
@@ -158,9 +158,6 @@ main(int argc, const char ** argv){
if
(
!
setup_test_case
(
g_config
,
test_case
))
goto
end
;
if
(
!
start_processes
(
g_config
,
p_servers
))
goto
end
;
if
(
!
start_processes
(
g_config
,
p_clients
))
goto
end
;
...
...
@@ -201,9 +198,6 @@ main(int argc, const char ** argv){
if
(
!
stop_processes
(
g_config
,
p_clients
))
goto
end
;
if
(
!
stop_processes
(
g_config
,
p_servers
))
goto
end
;
if
(
!
gather_result
(
g_config
,
&
result
))
goto
end
;
...
...
@@ -454,6 +448,7 @@ setup_config(atrt_config& config){
proc
.
m_proc
.
m_runas
=
proc
.
m_host
->
m_user
;
proc
.
m_proc
.
m_ulimit
=
"c:unlimited"
;
proc
.
m_proc
.
m_env
.
assfmt
(
"MYSQL_BASE_DIR=%s"
,
dir
.
c_str
());
proc
.
m_proc
.
m_shutdown_options
=
""
;
proc
.
m_hostname
=
proc
.
m_host
->
m_hostname
;
proc
.
m_ndb_mgm_port
=
g_default_base_port
;
if
(
split1
[
0
]
==
"mgm"
){
...
...
@@ -476,21 +471,19 @@ setup_config(atrt_config& config){
proc
.
m_proc
.
m_path
.
assign
(
dir
).
append
(
"/libexec/mysqld"
);
proc
.
m_proc
.
m_args
=
"--core-file --ndbcluster"
;
proc
.
m_proc
.
m_cwd
.
appfmt
(
"%d.mysqld"
,
index
);
if
(
mysql_port_offset
>
0
||
g_mysqld_use_base
){
// setup mysql specific stuff
const
char
*
basedir
=
proc
.
m_proc
.
m_cwd
.
c_str
();
proc
.
m_proc
.
m_args
.
appfmt
(
"--datadir=%s"
,
basedir
);
proc
.
m_proc
.
m_args
.
appfmt
(
"--pid-file=%s/mysql.pid"
,
basedir
);
proc
.
m_proc
.
m_args
.
appfmt
(
"--socket=%s/mysql.sock"
,
basedir
);
proc
.
m_proc
.
m_args
.
appfmt
(
"--port=%d"
,
g_default_base_port
-
(
++
mysql_port_offset
));
}
proc
.
m_proc
.
m_shutdown_options
=
"SIGKILL"
;
// not nice
}
else
if
(
split1
[
0
]
==
"api"
){
proc
.
m_type
=
atrt_process
::
NDB_API
;
proc
.
m_proc
.
m_name
.
assfmt
(
"%d-%s"
,
index
,
"ndb_api"
);
proc
.
m_proc
.
m_path
=
""
;
proc
.
m_proc
.
m_args
=
""
;
proc
.
m_proc
.
m_cwd
.
appfmt
(
"%d.ndb_api"
,
index
);
}
else
if
(
split1
[
0
]
==
"mysql"
){
proc
.
m_type
=
atrt_process
::
MYSQL_CLIENT
;
proc
.
m_proc
.
m_name
.
assfmt
(
"%d-%s"
,
index
,
"mysql"
);
proc
.
m_proc
.
m_path
=
""
;
proc
.
m_proc
.
m_args
=
""
;
proc
.
m_proc
.
m_cwd
.
appfmt
(
"%d.mysql"
,
index
);
}
else
{
g_logger
.
critical
(
"%s:%d: Unhandled process type: %s"
,
g_process_config_filename
,
lineno
,
...
...
@@ -913,6 +906,11 @@ read_test_case(FILE * file, atrt_testcase& tc, int& line){
tc
.
m_report
=
true
;
else
tc
.
m_report
=
false
;
if
(
p
.
get
(
"run-all"
,
&
mt
)
&&
strcmp
(
mt
,
"yes"
)
==
0
)
tc
.
m_run_all
=
true
;
else
tc
.
m_run_all
=
false
;
return
true
;
}
...
...
@@ -928,16 +926,17 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){
size_t
i
=
0
;
for
(;
i
<
config
.
m_processes
.
size
();
i
++
){
atrt_process
&
proc
=
config
.
m_processes
[
i
];
if
(
proc
.
m_type
==
atrt_process
::
NDB_API
){
if
(
proc
.
m_type
==
atrt_process
::
NDB_API
||
proc
.
m_type
==
atrt_process
::
MYSQL_CLIENT
){
proc
.
m_proc
.
m_path
.
assfmt
(
"%s/bin/%s"
,
proc
.
m_host
->
m_base_dir
.
c_str
(),
tc
.
m_command
.
c_str
());
proc
.
m_proc
.
m_args
.
assign
(
tc
.
m_args
);
break
;
if
(
!
tc
.
m_run_all
)
break
;
}
}
for
(
i
++
;
i
<
config
.
m_processes
.
size
();
i
++
){
atrt_process
&
proc
=
config
.
m_processes
[
i
];
if
(
proc
.
m_type
==
atrt_process
::
NDB_API
){
if
(
proc
.
m_type
==
atrt_process
::
NDB_API
||
proc
.
m_type
==
atrt_process
::
MYSQL_CLIENT
){
proc
.
m_proc
.
m_path
.
assign
(
""
);
proc
.
m_proc
.
m_args
.
assign
(
""
);
}
...
...
ndb/test/run-test/make-config.sh
View file @
c1f04bf0
#!/bin/sh
# NAME
# make-config.sh - Makes a config file for mgm server
#
# SYNOPSIS
# make-config.sh [ -t <template> ] [-s] [ -m <machine conf> [ -d <directory> ]
#
# DESCRIPTION
#
# OPTIONS
#
# EXAMPLES
#
#
# ENVIRONMENT
# NDB_PROJ_HOME Home dir for ndb
#
# FILES
# $NDB_PROJ_HOME/lib/funcs.sh general shell script functions
#
#
# SEE ALSO
#
# DIAGNOSTICTS
#
# VERSION
# 1.0
# 1.1 021112 epesson: Adapted for new mgmt server in NDB 2.00
#
# AUTHOR
# Jonas Oreland
#
# CHANGES
# also generate ndbnet config
#
progname
=
`
basename
$0
`
synopsis
=
"make-config.sh [ -t template ] [ -m <machine conf> ] [ -d <dst directory> ][-s] [<mgm host>]"
baseport
=
""
basedir
=
""
proc_no
=
1
node_id
=
1
#: ${NDB_PROJ_HOME:?} # If undefined, exit with error message
d_file
=
/tmp/d.
$$
dir_file
=
/tmp/dirs.
$$
config_file
=
/tmp/config.
$$
cluster_file
=
/tmp/cluster.
$$
#: ${NDB_LOCAL_BUILD_OPTIONS:=--} # If undef, set to --. Keeps getopts happy.
# You may have to experiment a bit
# to get quoting right (if you need it).
#. $NDB_PROJ_HOME/lib/funcs.sh # Load some good stuff
trace
()
{
echo
$*
1>&2
}
syndie
()
{
trace
$*
exit
1
}
# defaults for options related variables
#
mgm_nodes
=
0
ndb_nodes
=
0
api_nodes
=
0
uniq_id
=
$$
.
$$
own_host
=
`
hostname
`
dst_dir
=
""
template
=
/dev/null
machines
=
/dev/null
verbose
=
yes
# used if error when parsing the options environment variable
#
env_opterr
=
"options environment variable: <<
$options
>>"
# Option parsing, for the options variable as well as the command line.
#
# We want to be able to set options in an environment variable,
# as well as on the command line. In order not to have to repeat
# the same getopts information twice, we loop two times over the
# getopts while loop. The first time, we process options from
# the options environment variable, the second time we process
# options from the command line.
#
# The things to change are the actual options and what they do.
#
add_node
(){
no
=
$1
;
shift
add_procs
(){
type
=
$1
;
shift
echo
$*
|
awk
'BEGIN{FS=":";}{h=$1; if(h=="localhost") h="'
$own_host
'";
printf("%s_%d_host=%s\n", "'
$type
'", "'
$no
'", h);
if(NF>1 && $2!="") printf("%s_%d_port=%d\n",
"'
$type
'", "'
$no
'", $2);
if(NF>2 && $3!="") printf("%s_%d_dir=%s\n",
"'
$type
'", "'
$no
'", $3);
}'
}
add_mgm_node
(){
mgm_nodes
=
`
cat
/tmp/mgm_nodes.
$uniq_id
|
grep
"_host="
|
wc
-l
`
mgm_nodes
=
`
expr
$mgm_nodes
+ 1
`
while
[
$#
-gt
0
]
do
add_node
${
mgm_nodes
}
mgm_node
$1
>>
/tmp/mgm_nodes.
$uniq_id
shift
mgm_nodes
=
`
expr
$mgm_nodes
+ 1
`
done
}
add_ndb_node
(){
ndb_nodes
=
`
cat
/tmp/ndb_nodes.
$uniq_id
|
grep
"_host="
|
wc
-l
`
ndb_nodes
=
`
expr
$ndb_nodes
+ 1
`
while
[
$#
-gt
0
]
do
add_node
${
ndb_nodes
}
ndb_node
$1
>>
/tmp/ndb_nodes.
$uniq_id
shift
ndb_nodes
=
`
expr
$ndb_nodes
+ 1
`
done
}
add_api_node
(){
api_nodes
=
`
cat
/tmp/api_nodes.
$uniq_id
|
grep
"_host="
|wc
-l
`
api_nodes
=
`
expr
$api_nodes
+ 1
`
while
[
$#
-gt
0
]
do
add_node
${
api_nodes
}
api_node
$1
>>
/tmp/api_nodes.
$uniq_id
shift
api_nodes
=
`
expr
$api_nodes
+ 1
`
done
while
[
$#
-ne
0
]
do
add_proc
$type
$1
shift
done
}
rm
-rf
/tmp/mgm_nodes.
$uniq_id
;
touch
/tmp/mgm_nodes.
$uniq_id
rm
-rf
/tmp/ndb_nodes.
$uniq_id
;
touch
/tmp/ndb_nodes.
$uniq_id
rm
-rf
/tmp/api_nodes.
$uniq_id
;
touch
/tmp/api_nodes.
$uniq_id
for
optstring
in
"
$options
"
""
# 1. options variable 2. cmd line
do
while
getopts
d:m:t:n:o:a:b:p:s i
$optstring
# optstring empty => no arg => cmd line
do
case
$i
in
q
)
verbose
=
""
;;
# echo important things
t
)
template
=
$OPTARG
;;
# Template
d
)
dst_dir
=
$OPTARG
;;
# Destination directory
m
)
machines
=
$OPTARG
;;
# Machine configuration
s
)
mgm_start
=
yes
;;
# Make mgm start script
\?
)
syndie
$env_opterr
;;
# print synopsis and exit
add_proc
(){
dir
=
""
conf
=
""
case
$type
in
mgm
)
dir
=
"ndb_mgmd"
conf
=
"[ndb_mgmd]
\n
Id:
$node_id
\n
HostName:
$2
\n
"
node_id
=
`
expr
$node_id
+ 1
`
;;
api
)
dir
=
"ndb_api"
conf
=
"[api]
\n
Id:
$node_id
\n
HostName:
$2
\n
"
node_id
=
`
expr
$node_id
+ 1
`
;;
ndb
)
dir
=
"ndbd"
conf
=
"[ndbd]
\n
Id:
$node_id
\n
HostName:
$2
\n
"
node_id
=
`
expr
$node_id
+ 1
`
;;
mysqld
)
dir
=
"mysqld"
conf
=
"[mysqld]
\n
Id:
$node_id
\n
HostName:
$2
\n
"
node_id
=
`
expr
$node_id
+ 1
`
;;
mysql
)
dir
=
"mysql"
;;
esac
done
[
-n
"
$optstring
"
]
&&
OPTIND
=
1
# Reset for round 2, cmdline options
env_opterr
=
# Round 2 should not use the value
done
shift
`
expr
$OPTIND
- 1
`
if
[
-z
"
$dst_dir
"
]
then
verbose
=
fi
skip
(){
no
=
$1
;
shift
shift
$no
echo
$*
dir
=
"
$proc_no
.
$dir
"
proc_no
=
`
expr
$proc_no
+ 1
`
echo
-e
$dir
>>
$dir_file
if
[
"
$conf
"
]
then
echo
-e
$conf
>>
$config_file
fi
}
# --- option parsing done ---
grep
"^ndb: "
$machines
|
while
read
node
do
node
=
`
skip 1
$node
`
add_ndb_node
$node
done
grep
"^api: "
$machines
|
while
read
node
do
node
=
`
skip 1
$node
`
add_api_node
$node
done
grep
"^mgm: "
$machines
|
while
read
node
cnf
=
/dev/null
cat
$1
|
while
read
line
do
node
=
`
skip 1
$node
`
add_mgm_node
$node
case
$line
in
baseport:
*
)
baseport
=
`
echo
$line
|
sed
's/baseport[ ]*:[ ]*//g'
`
;;
basedir:
*
)
basedir
=
`
echo
$line
|
sed
's/basedir[ ]*:[ ]*//g'
`
;;
mgm:
*
)
add_procs mgm
`
echo
$line
|
sed
's/mgm[ ]*:[ ]*//g'
`
;;
api:
*
)
add_procs api
`
echo
$line
|
sed
's/api[ ]*:[ ]*//g'
`
;;
ndb:
*
)
add_procs ndb
`
echo
$line
|
sed
's/ndb[ ]*:[ ]*//g'
`
;;
mysqld:
*
)
add_procs mysqld
`
echo
$line
|
sed
's/mysqld[ ]*:[ ]*//g'
`
;;
mysql:
*
)
add_procs mysql
`
echo
$line
|
sed
's/mysql[ ]*:[ ]*//g'
`
;;
"-- cluster config"
)
if
[
"
$cnf
"
=
"/dev/null"
]
then
cnf
=
$cluster_file
else
cnf
=
/dev/null
fi
line
=
""
;;
*
)
echo
$line
>>
$cnf
;
line
=
""
;;
esac
if
[
"
$line
"
]
then
echo
$line
>>
$d_file
fi
done
tmp
=
`
grep
"^baseport: "
$machines
|
tail
-1
|
cut
-d
":"
-f
2
`
if
[
"
$tmp
"
]
then
baseport
=
`
echo
$tmp
`
else
syndie
"Unable to find baseport"
fi
cat
$dir_file
| xargs
mkdir
-p
trim
(){
echo
$*
}
tmp
=
`
grep
"^basedir: "
$machines
|
tail
-1
|
cut
-d
":"
-f
2
`
if
[
"
$tmp
"
]
then
basedir
=
`
trim
$tmp
`
fi
# -- Load enviroment --
ndb_nodes
=
`
cat
/tmp/ndb_nodes.
$uniq_id
|
grep
"_host="
|
wc
-l
`
api_nodes
=
`
cat
/tmp/api_nodes.
$uniq_id
|
grep
"_host="
|
wc
-l
`
mgm_nodes
=
`
cat
/tmp/mgm_nodes.
$uniq_id
|
grep
"_host="
|
wc
-l
`
.
/tmp/ndb_nodes.
$uniq_id
.
/tmp/api_nodes.
$uniq_id
.
/tmp/mgm_nodes.
$uniq_id
rm
-f
/tmp/ndb_nodes.
$uniq_id
/tmp/api_nodes.
$uniq_id
/tmp/mgm_nodes.
$uniq_id
# -- Verify
trace
"Verifying arguments"
if
[
!
-r
$template
]
then
syndie
"Unable to read template file:
$template
"
fi
if
[
$ndb_nodes
-le
0
]
then
syndie
"No ndb nodes specified"
fi
if
[
$api_nodes
-le
0
]
then
syndie
"No api nodes specified"
fi
if
[
$mgm_nodes
-gt
1
]
then
syndie
"More than one mgm node specified"
fi
if
[
$mgm_nodes
-eq
0
]
then
trace
"No managment server specified using
`
hostname
`
"
mgm_nodes
=
1
mgm_node_1
=
`
hostname
`
fi
if
[
-n
"
$dst_dir
"
]
then
mkdir
-p
$dst_dir
if
[
!
-d
$dst_dir
]
if
[
-f
$cluster_file
]
then
syndie
"Unable to create dst dir:
$dst_dir
"
fi
DST
=
/tmp/
$uniq_id
cat
$cluster_file
$config_file
>>
/tmp/config2.
$$
mv
/tmp/config2.
$$
$config_file
fi
# --- option verifying done ---
# Find uniq computers
i
=
1
while
[
$i
-le
$mgm_nodes
]
do
echo
`
eval echo
"
\$
"
mgm_node_
${
i
}
_host
`
>>
/tmp/hosts.
$uniq_id
i
=
`
expr
$i
+ 1
`
done
i
=
1
while
[
$i
-le
$ndb_nodes
]
do
echo
`
eval echo
"
\$
"
ndb_node_
${
i
}
_host
`
>>
/tmp/hosts.
$uniq_id
i
=
`
expr
$i
+ 1
`
done
i
=
1
while
[
$i
-le
$api_nodes
]
do
echo
`
eval echo
"
\$
"
api_node_
${
i
}
_host
`
>>
/tmp/hosts.
$uniq_id
i
=
`
expr
$i
+ 1
`
for
i
in
`
find
.
-type
d
-name
'*.ndb_mgmd'
`
do
cp
$config_file
$i
/config.ini
done
sort
-u
-o
/tmp/hosts.
$uniq_id
/tmp/hosts.
$uniq_id
get_computer_id
(){
grep
-w
-n
$1
/tmp/hosts.
$uniq_id
|
cut
-d
":"
-f
1
}
get_mgm_computer_id
(){
a
=
`
eval echo
"
\$
"
mgm_node_
${
1
}
_host
`
get_computer_id
$a
}
get_ndb_computer_id
(){
a
=
`
eval echo
"
\$
"
ndb_node_
${
1
}
_host
`
get_computer_id
$a
}
get_api_computer_id
(){
a
=
`
eval echo
"
\$
"
api_node_
${
1
}
_host
`
get_computer_id
$a
}
# -- Write config files --
mgm_port
=
$baseport
(
i
=
1
#echo "COMPUTERS"
cat
/tmp/hosts.
$uniq_id
|
while
read
host
do
echo
"[COMPUTER]"
echo
"Id:
$i
"
echo
"ByteOrder: Big"
echo
"HostName:
$host
"
echo
i
=
`
expr
$i
+ 1
`
done
node_id
=
1
echo
# Mgm process
echo
echo
"[MGM]"
echo
"Id:
$node_id
"
echo
"ExecuteOnComputer:
`
get_mgm_computer_id 1
`
"
echo
"PortNumber:
$mgm_port
"
node_id
=
`
expr
$node_id
+ 1
`
# Ndb processes
i
=
1
ndb_nodes
=
`
trim
$ndb_nodes
`
while
[
$i
-le
$ndb_nodes
]
do
echo
echo
"[DB]"
echo
"Id:
$node_id
"
echo
"ExecuteOnComputer:
`
get_ndb_computer_id
$i
`
"
echo
"FileSystemPath:
$basedir
/run/node-
${
node_id
}
-fs"
i
=
`
expr
$i
+ 1
`
node_id
=
`
expr
$node_id
+ 1
`
done
# API processes
i
=
1
while
[
$i
-le
$api_nodes
]
do
echo
echo
"[API]"
echo
"Id:
$node_id
"
echo
"ExecuteOnComputer:
`
get_api_computer_id
$i
`
"
i
=
`
expr
$i
+ 1
`
node_id
=
`
expr
$node_id
+ 1
`
done
# Connections
current_port
=
`
expr
$mgm_port
+ 1
`
echo
# Connect Mgm with all ndb-nodes
i
=
1
while
[
$i
-le
$ndb_nodes
]
do
echo
echo
"[TCP]"
echo
"NodeId1: 1"
echo
"NodeId2:
`
expr
$i
+ 1
`
"
echo
"PortNumber:
$current_port
"
i
=
`
expr
$i
+ 1
`
current_port
=
`
expr
$current_port
+ 1
`
done
# Connect All ndb processes with all ndb processes
i
=
1
while
[
$i
-le
$ndb_nodes
]
do
j
=
`
expr
$i
+ 1
`
while
[
$j
-le
$ndb_nodes
]
do
echo
echo
"[TCP]"
echo
"NodeId1:
`
expr
$i
+ 1
`
"
echo
"NodeId2:
`
expr
$j
+ 1
`
"
echo
"PortNumber:
$current_port
"
j
=
`
expr
$j
+ 1
`
current_port
=
`
expr
$current_port
+ 1
`
done
i
=
`
expr
$i
+ 1
`
done
# Connect all ndb-nodes with all api nodes
i
=
1
while
[
$i
-le
$ndb_nodes
]
do
j
=
1
while
[
$j
-le
$api_nodes
]
do
echo
echo
"[TCP]"
echo
"NodeId1:
`
expr
$i
+ 1
`
"
echo
"NodeId2:
`
expr
$j
+
$ndb_nodes
+ 1
`
"
echo
"PortNumber:
$current_port
"
j
=
`
expr
$j
+ 1
`
current_port
=
`
expr
$current_port
+ 1
`
done
i
=
`
expr
$i
+ 1
`
done
echo
)
>
$DST
trace
"Init config file done"
if
[
-z
"
$dst_dir
"
]
then
cat
$DST
rm
-f
$DST
rm
-f
/tmp/hosts.
$uniq_id
exit
0
fi
###
# Create Ndb.cfg files
# nodeid=2;host=localhost:2200
# Mgm node
mkcfg
(){
mkdir
-p
$dst_dir
/
${
2
}
.ndb_
${
1
}
(
echo
"OwnProcessId
$2
"
echo
"host://
${
mgm_node_1_host
}
:
${
mgm_port
}
"
)
>
$dst_dir
/
${
2
}
.ndb_
${
1
}
/Ndb.cfg
if
[
$1
=
"db"
]
then
mkdir
$dst_dir
/node-
${
2
}
-fs
fi
}
mkcfg mgm 1
cat
$DST
>
$dst_dir
/1.ndb_mgm/initconfig.txt
trace
"Creating Ndb.cfg for ndb nodes"
current_node
=
2
i
=
1
while
[
$i
-le
$ndb_nodes
]
do
mkcfg db
${
current_node
}
i
=
`
expr
$i
+ 1
`
current_node
=
`
expr
$current_node
+ 1
`
done
trace
"Creating Ndb.cfg for api nodes"
i
=
1
while
[
$i
-le
$api_nodes
]
do
mkcfg api
${
current_node
}
i
=
`
expr
$i
+ 1
`
current_node
=
`
expr
$current_node
+ 1
`
done
rm
-f
$DST
rm
-f
/tmp/hosts.
$uniq_id
exit
0
# vim: set sw=4:
mv
$d_file
d.txt
rm
-f
$config_file
$dir_file
$cluster_file
ndb/test/run-test/ndb-autotest.sh
View file @
c1f04bf0
#!/bin/sh
save_args
=
$*
VERSION
=
"ndb-autotest.sh version 1.0"
VERSION
=
"ndb-autotest.sh version 1.0
4
"
DATE
=
`
date
'+%Y-%m-%d'
`
export
DATE
...
...
@@ -71,11 +71,18 @@ then
cd
$dst_place
rm
-rf
$run_dir
/
*
aclocal
;
autoheader
;
autoconf
;
automake
(
cd
innobase
;
aclocal
;
autoheader
;
autoconf
;
automake
)
(
cd
bdb/dist
;
sh s_all
)
if
[
-d
storage
]
then
(
cd
storage/innobase
;
aclocal
;
autoheader
;
autoconf
;
automake
)
(
cd
storage/bdb/dist
;
sh s_all
)
else
(
cd
innobase
;
aclocal
;
autoheader
;
autoconf
;
automake
)
(
cd
bdb/dist
;
sh s_all
)
fi
eval
$configure
--prefix
=
$run_dir
make
make
install
(
cd
$run_dir
;
./bin/mysql_install_db
)
fi
###
...
...
@@ -103,7 +110,9 @@ fi
test_dir
=
$run_dir
/mysql-test/ndb
atrt
=
$test_dir
/atrt
html
=
$test_dir
/make-html-reports.sh
PATH
=
$test_dir
:
$PATH
mkconfig
=
$run_dir
/mysql-test/ndb/make-config.sh
PATH
=
$run_dir
/bin:
$test_dir
:
$PATH
export
PATH
filter
(){
...
...
@@ -125,20 +134,16 @@ hosts=`cat /tmp/hosts.$DATE`
if
[
"
$deploy
"
]
then
(
cd
/
&&
tar
cfz /tmp/build.
$DATE
.tgz
$run_dir
)
for
i
in
$hosts
do
ok
=
0
scp /tmp/build.
$DATE
.tgz
$i
:/tmp/build.
$DATE
.
$$
.tgz
&&
\
ssh
$i
"rm -rf /space/autotest/*"
&&
\
ssh
$i
"cd / && tar xfz /tmp/build.
$DATE
.
$$
.tgz"
&&
\
ssh
$i
"rm /tmp/build.
$DATE
.
$$
.tgz"
&&
ok
=
1
if
[
$ok
-eq
0
]
then
echo
"
$i
failed during scp/ssh, excluding"
echo
$i
>>
/tmp/failed.
$DATE
fi
done
for
i
in
$hosts
do
rsync
-a
--delete
--force
--ignore-errors
$run_dir
/
$i
:
$run_dir
ok
=
$?
if
[
$ok
-ne
0
]
then
echo
"
$i
failed during rsync, excluding"
echo
$i
>>
/tmp/failed.
$DATE
fi
done
fi
rm
-f
/tmp/build.
$DATE
.tgz
...
...
@@ -170,6 +175,18 @@ choose(){
cat
$TMP1
rm
-f
$TMP1
}
choose_conf
(){
host
=
`
hostname
-s
`
if
[
-f
$test_dir
/conf-
$1
-
$host
.txt
]
then
echo
"
$test_dir
/conf-
$1
-
$host
.txt"
elif
[
-f
$test_dir
/conf-
$1
.txt
]
then
echo
"
$test_dir
/conf-
$1
.txt"
fi
}
start
(){
rm
-rf
report.txt result
*
log.txt
$atrt
-v
-v
-r
-R
--log-file
=
log.txt
--testcase-file
=
$test_dir
/
$2
-tests
.txt &
...
...
@@ -186,11 +203,17 @@ start(){
p2
=
`
pwd
`
cd
..
tar
cfz /tmp/res.
$$
.tgz
`
basename
$p2
`
/
$DATE
scp /tmp/res.
$$
.tgz
$result_host
:
$result_path
ssh
$result_host
"cd
$result_path
&& tar xfz res.
$$
.tgz && rm -f res.
$$
.tgz"
scp /tmp/res.
$$
.tgz
$result_host
:
$result_path
/res.
$DATE
.
`
hostname
-s
`
.
$2
.
$$
.tgz
rm
-f
/tmp/res.
$$
.tgz
}
count_hosts
(){
cnt
=
`
grep
"CHOOSE_host"
$1
|
awk
'{for(i=1; i<=NF;i++) if(match($i, "CHOOSE_host") > 0) print $i;}'
|
sort
|
uniq
|
wc
-l
`
echo
$cnt
}
p
=
`
pwd
`
for
dir
in
$RUN
do
...
...
@@ -199,10 +222,11 @@ do
run_dir
=
$base_dir
/run-
$dir
-mysql-
$clone
-
$target
res_dir
=
$base_dir
/result-
$dir
-mysql-
$clone
-
$target
/
$DATE
mkdir
-p
$res_dir
rm
-rf
$res_dir
/
*
mkdir
-p
$r
un_dir
$r
es_dir
rm
-rf
$res_dir
/
*
$run_dir
/
*
count
=
`
grep
-c
"COMPUTER"
$run_dir
/1.ndb_mgmd/initconfig.template
`
conf
=
`
choose_conf
$dir
`
count
=
`
count_hosts
$conf
`
avail_hosts
=
`
filter /tmp/filter_hosts.
$$
$hosts
`
avail
=
`
echo
$avail_hosts
|
wc
-w
`
if
[
$count
-gt
$avail
]
...
...
@@ -212,12 +236,12 @@ do
break
;
fi
run_hosts
=
`
echo
$avail_hosts
|
awk
'{for(i=1;i<='
$count
';i++)print $i;}'
`
choose
$run_dir
/d.template
$run_hosts
>
$run_dir
/d.txt
choose
$run_dir
/1.ndb_mgmd/initconfig.template
$run_hosts
>
$run_dir
/1.ndb_mgmd/config.ini
run_hosts
=
`
echo
$avail_hosts
|awk
'{for(i=1;i<='
$count
';i++)print $i;}'
`
echo
$run_hosts
>>
/tmp/filter_hosts.
$$
cd
$run_dir
choose
$conf
$run_hosts
>
d.tmp
$mkconfig
d.tmp
start
$dir
-mysql-
$clone
-
$target
$dir
$res_dir
&
done
cd
$p
...
...
ndb/test/run-test/run-test.hpp
View file @
c1f04bf0
...
...
@@ -69,6 +69,7 @@ struct atrt_config {
struct
atrt_testcase
{
bool
m_report
;
bool
m_run_all
;
time_t
m_max_time
;
BaseString
m_command
;
BaseString
m_args
;
...
...
ndb/test/src/CpcClient.cpp
View file @
c1f04bf0
...
...
@@ -282,6 +282,7 @@ convert(const Properties & src, SimpleCpcClient::Process & dst){
b
&=
src
.
get
(
"stdout"
,
dst
.
m_stdout
);
b
&=
src
.
get
(
"stderr"
,
dst
.
m_stderr
);
b
&=
src
.
get
(
"ulimit"
,
dst
.
m_ulimit
);
b
&=
src
.
get
(
"shutdown"
,
dst
.
m_shutdown_options
);
return
b
;
}
...
...
@@ -305,6 +306,7 @@ convert(const SimpleCpcClient::Process & src, Properties & dst ){
b
&=
dst
.
put
(
"stdout"
,
src
.
m_stdout
.
c_str
());
b
&=
dst
.
put
(
"stderr"
,
src
.
m_stderr
.
c_str
());
b
&=
dst
.
put
(
"ulimit"
,
src
.
m_ulimit
.
c_str
());
b
&=
dst
.
put
(
"shutdown"
,
src
.
m_shutdown_options
.
c_str
());
return
b
;
}
...
...
@@ -372,6 +374,7 @@ SimpleCpcClient::list_processes(Vector<Process> &procs, Properties& reply) {
CPC_ARG
(
"stdout"
,
String
,
Mandatory
,
"Redirect stdout"
),
CPC_ARG
(
"stderr"
,
String
,
Mandatory
,
"Redirect stderr"
),
CPC_ARG
(
"ulimit"
,
String
,
Mandatory
,
"ulimit"
),
CPC_ARG
(
"shutdown"
,
String
,
Mandatory
,
"shutdown"
),
CPC_END
()
};
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment