Commit 798e7878 authored by joreland@mysql.com's avatar joreland@mysql.com

Merge joreland@bk-internal.mysql.com:/home/bk/mysql-5.0

into mysql.com:/home/jonas/src/mysql-5.0
parents 3294a34e 48d6d545
...@@ -210,7 +210,7 @@ drop table if exists t1; ...@@ -210,7 +210,7 @@ drop table if exists t1;
SET SESSION storage_engine="heap"; SET SESSION storage_engine="heap";
SELECT @@storage_engine; SELECT @@storage_engine;
@@storage_engine @@storage_engine
HEAP MEMORY
CREATE TABLE t1 (a int not null); CREATE TABLE t1 (a int not null);
show create table t1; show create table t1;
Table Create Table Table Create Table
...@@ -222,7 +222,7 @@ SET SESSION storage_engine="gemini"; ...@@ -222,7 +222,7 @@ SET SESSION storage_engine="gemini";
ERROR 42000: Unknown table engine 'gemini' ERROR 42000: Unknown table engine 'gemini'
SELECT @@storage_engine; SELECT @@storage_engine;
@@storage_engine @@storage_engine
HEAP MEMORY
CREATE TABLE t1 (a int not null); CREATE TABLE t1 (a int not null);
show create table t1; show create table t1;
Table Create Table Table Create Table
...@@ -371,7 +371,7 @@ drop database mysqltest; ...@@ -371,7 +371,7 @@ drop database mysqltest;
SET SESSION storage_engine="heap"; SET SESSION storage_engine="heap";
SELECT @@storage_engine; SELECT @@storage_engine;
@@storage_engine @@storage_engine
HEAP MEMORY
CREATE TABLE t1 (a int not null); CREATE TABLE t1 (a int not null);
show create table t1; show create table t1;
Table Create Table Table Create Table
...@@ -383,7 +383,7 @@ SET SESSION storage_engine="gemini"; ...@@ -383,7 +383,7 @@ SET SESSION storage_engine="gemini";
ERROR 42000: Unknown table engine 'gemini' ERROR 42000: Unknown table engine 'gemini'
SELECT @@storage_engine; SELECT @@storage_engine;
@@storage_engine @@storage_engine
HEAP MEMORY
CREATE TABLE t1 (a int not null); CREATE TABLE t1 (a int not null);
show create table t1; show create table t1;
Table Create Table Table Create Table
......
drop table if exists t1;
drop database if exists mysqltest;
drop table if exists t1;
drop database if exists mysqltest;
create database mysqltest;
create database mysqltest;
create table mysqltest.t1 (a int primary key, b int) engine=ndb;
use mysqltest;
show tables;
Tables_in_mysqltest
t1
drop database mysqltest;
use mysqltest;
show tables;
Tables_in_mysqltest
create database mysqltest;
create table mysqltest.t1 (c int, d int primary key) engine=ndb;
use mysqltest;
show tables;
Tables_in_mysqltest
t1
drop database mysqltest;
use mysqltest;
show tables;
Tables_in_mysqltest
drop table if exists t1;
drop database if exists mysqltest;
...@@ -322,8 +322,8 @@ prepare stmt4 from ' show storage engines '; ...@@ -322,8 +322,8 @@ prepare stmt4 from ' show storage engines ';
execute stmt4; execute stmt4;
Engine Support Comment Engine Support Comment
MyISAM YES/NO Default engine as of MySQL 3.23 with great performance MyISAM YES/NO Default engine as of MySQL 3.23 with great performance
HEAP YES/NO Alias for MEMORY
MEMORY YES/NO Hash based, stored in memory, useful for temporary tables MEMORY YES/NO Hash based, stored in memory, useful for temporary tables
HEAP YES/NO Alias for MEMORY
MERGE YES/NO Collection of identical MyISAM tables MERGE YES/NO Collection of identical MyISAM tables
MRG_MYISAM YES/NO Alias for MERGE MRG_MYISAM YES/NO Alias for MERGE
ISAM YES/NO Obsolete storage engine, now replaced by MyISAM ISAM YES/NO Obsolete storage engine, now replaced by MyISAM
......
...@@ -667,6 +667,8 @@ delete from t1| ...@@ -667,6 +667,8 @@ delete from t1|
drop table if exists t3| drop table if exists t3|
create table t3 ( s char(16), d int)| create table t3 ( s char(16), d int)|
call into_test4()| call into_test4()|
Warnings:
Warning 1329 No data to FETCH
select * from t3| select * from t3|
s d s d
into4 NULL into4 NULL
...@@ -1792,7 +1794,12 @@ end if; ...@@ -1792,7 +1794,12 @@ end if;
insert into t4 values (2, rc, t3); insert into t4 values (2, rc, t3);
end| end|
call bug1863(10)| call bug1863(10)|
Warnings:
Note 1051 Unknown table 'temp_t1'
Warning 1329 No data to FETCH
call bug1863(10)| call bug1863(10)|
Warnings:
Warning 1329 No data to FETCH
select * from t4| select * from t4|
f1 rc t3 f1 rc t3
2 0 NULL 2 0 NULL
...@@ -2090,7 +2097,11 @@ begin ...@@ -2090,7 +2097,11 @@ begin
end| end|
call bug4579_1()| call bug4579_1()|
call bug4579_1()| call bug4579_1()|
Warnings:
Warning 1329 No data to FETCH
call bug4579_1()| call bug4579_1()|
Warnings:
Warning 1329 No data to FETCH
drop procedure bug4579_1| drop procedure bug4579_1|
drop procedure bug4579_2| drop procedure bug4579_2|
drop table t3| drop table t3|
...@@ -3010,4 +3021,24 @@ select @x| ...@@ -3010,4 +3021,24 @@ select @x|
@x @x
2005 2005
drop function bug8861| drop function bug8861|
drop procedure if exists bug9004_1|
drop procedure if exists bug9004_2|
create procedure bug9004_1(x char(16))
begin
insert into t1 values (x, 42);
insert into t1 values (x, 17);
end|
create procedure bug9004_2(x char(16))
call bug9004_1(x)|
call bug9004_1('12345678901234567')|
Warnings:
Warning 1265 Data truncated for column 'id' at row 1
Warning 1265 Data truncated for column 'id' at row 2
call bug9004_2('12345678901234567890')|
Warnings:
Warning 1265 Data truncated for column 'id' at row 1
Warning 1265 Data truncated for column 'id' at row 2
delete from t1|
drop procedure bug9004_1|
drop procedure bug9004_2|
drop table t1,t2; drop table t1,t2;
...@@ -148,7 +148,7 @@ timed_mutexes OFF ...@@ -148,7 +148,7 @@ timed_mutexes OFF
set storage_engine=MYISAM, storage_engine="HEAP", global storage_engine="MERGE"; set storage_engine=MYISAM, storage_engine="HEAP", global storage_engine="MERGE";
show local variables like 'storage_engine'; show local variables like 'storage_engine';
Variable_name Value Variable_name Value
storage_engine HEAP storage_engine MEMORY
show global variables like 'storage_engine'; show global variables like 'storage_engine';
Variable_name Value Variable_name Value
storage_engine MERGE storage_engine MERGE
...@@ -254,7 +254,7 @@ set storage_engine=MERGE, big_tables=2; ...@@ -254,7 +254,7 @@ set storage_engine=MERGE, big_tables=2;
ERROR 42000: Variable 'big_tables' can't be set to the value of '2' ERROR 42000: Variable 'big_tables' can't be set to the value of '2'
show local variables like 'storage_engine'; show local variables like 'storage_engine';
Variable_name Value Variable_name Value
storage_engine HEAP storage_engine MEMORY
set SESSION query_cache_size=10000; set SESSION query_cache_size=10000;
ERROR HY000: Variable 'query_cache_size' is a GLOBAL variable and should be set with SET GLOBAL ERROR HY000: Variable 'query_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
set GLOBAL storage_engine=DEFAULT; set GLOBAL storage_engine=DEFAULT;
......
-- source include/have_ndb.inc
-- source include/have_multi_ndb.inc
-- source include/not_embedded.inc
--disable_warnings
connection server1;
drop table if exists t1;
drop database if exists mysqltest;
connection server2;
drop table if exists t1;
drop database if exists mysqltest;
--enable_warnings
#
# Check that all tables in a database are dropped when database is dropped
#
connection server1;
create database mysqltest;
connection server2;
create database mysqltest;
create table mysqltest.t1 (a int primary key, b int) engine=ndb;
use mysqltest;
show tables;
connection server1;
drop database mysqltest;
connection server2;
use mysqltest;
show tables;
connection server1;
create database mysqltest;
create table mysqltest.t1 (c int, d int primary key) engine=ndb;
use mysqltest;
show tables;
connection server2;
drop database mysqltest;
connection server1;
use mysqltest;
show tables;
--disable_warnings
drop table if exists t1;
drop database if exists mysqltest;
--enable_warnings
...@@ -3700,6 +3700,30 @@ select @x| ...@@ -3700,6 +3700,30 @@ select @x|
drop function bug8861| drop function bug8861|
#
# BUG#9004: Inconsistent behaviour of SP re. warnings
#
--disable_warnings
drop procedure if exists bug9004_1|
drop procedure if exists bug9004_2|
--enable_warnings
create procedure bug9004_1(x char(16))
begin
insert into t1 values (x, 42);
insert into t1 values (x, 17);
end|
create procedure bug9004_2(x char(16))
call bug9004_1(x)|
# Truncation warnings expected...
call bug9004_1('12345678901234567')|
call bug9004_2('12345678901234567890')|
delete from t1|
drop procedure bug9004_1|
drop procedure bug9004_2|
# #
# BUG#NNNN: New bug synopsis # BUG#NNNN: New bug synopsis
# #
......
...@@ -10,7 +10,7 @@ Next DBTC 8035 ...@@ -10,7 +10,7 @@ Next DBTC 8035
Next CMVMI 9000 Next CMVMI 9000
Next BACKUP 10022 Next BACKUP 10022
Next DBUTIL 11002 Next DBUTIL 11002
Next DBTUX 12007 Next DBTUX 12008
Next SUMA 13001 Next SUMA 13001
TESTING NODE FAILURE, ARBITRATION TESTING NODE FAILURE, ARBITRATION
...@@ -443,6 +443,7 @@ Test routing of signals: ...@@ -443,6 +443,7 @@ Test routing of signals:
Ordered index: Ordered index:
-------------- --------------
12007: Make next alloc node fail with no memory error
Dbdict: Dbdict:
------- -------
......
...@@ -1779,6 +1779,10 @@ private: ...@@ -1779,6 +1779,10 @@ private:
Operationrec* const regOperPtr, Operationrec* const regOperPtr,
Tablerec* const regTabPtr); Tablerec* const regTabPtr);
int addTuxEntries(Signal* signal,
Operationrec* regOperPtr,
Tablerec* regTabPtr);
// these crash the node on error // these crash the node on error
void executeTuxCommitTriggers(Signal* signal, void executeTuxCommitTriggers(Signal* signal,
...@@ -1789,6 +1793,10 @@ private: ...@@ -1789,6 +1793,10 @@ private:
Operationrec* regOperPtr, Operationrec* regOperPtr,
Tablerec* const regTabPtr); Tablerec* const regTabPtr);
void removeTuxEntries(Signal* signal,
Operationrec* regOperPtr,
Tablerec* regTabPtr);
// ***************************************************************** // *****************************************************************
// Error Handling routines. // Error Handling routines.
// ***************************************************************** // *****************************************************************
......
...@@ -973,25 +973,7 @@ Dbtup::executeTuxInsertTriggers(Signal* signal, ...@@ -973,25 +973,7 @@ Dbtup::executeTuxInsertTriggers(Signal* signal,
req->pageOffset = regOperPtr->pageOffset; req->pageOffset = regOperPtr->pageOffset;
req->tupVersion = tupVersion; req->tupVersion = tupVersion;
req->opInfo = TuxMaintReq::OpAdd; req->opInfo = TuxMaintReq::OpAdd;
// loop over index list return addTuxEntries(signal, regOperPtr, regTabPtr);
const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
TriggerPtr triggerPtr;
triggerList.first(triggerPtr);
while (triggerPtr.i != RNIL) {
ljam();
req->indexId = triggerPtr.p->indexId;
req->errorCode = RNIL;
EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
signal, TuxMaintReq::SignalLength);
ljamEntry();
if (req->errorCode != 0) {
ljam();
terrorCode = req->errorCode;
return -1;
}
triggerList.next(triggerPtr);
}
return 0;
} }
int int
...@@ -1012,9 +994,18 @@ Dbtup::executeTuxUpdateTriggers(Signal* signal, ...@@ -1012,9 +994,18 @@ Dbtup::executeTuxUpdateTriggers(Signal* signal,
req->pageOffset = regOperPtr->pageOffset; req->pageOffset = regOperPtr->pageOffset;
req->tupVersion = tupVersion; req->tupVersion = tupVersion;
req->opInfo = TuxMaintReq::OpAdd; req->opInfo = TuxMaintReq::OpAdd;
// loop over index list return addTuxEntries(signal, regOperPtr, regTabPtr);
}
int
Dbtup::addTuxEntries(Signal* signal,
Operationrec* regOperPtr,
Tablerec* regTabPtr)
{
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers; const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
TriggerPtr triggerPtr; TriggerPtr triggerPtr;
Uint32 failPtrI;
triggerList.first(triggerPtr); triggerList.first(triggerPtr);
while (triggerPtr.i != RNIL) { while (triggerPtr.i != RNIL) {
ljam(); ljam();
...@@ -1026,11 +1017,29 @@ Dbtup::executeTuxUpdateTriggers(Signal* signal, ...@@ -1026,11 +1017,29 @@ Dbtup::executeTuxUpdateTriggers(Signal* signal,
if (req->errorCode != 0) { if (req->errorCode != 0) {
ljam(); ljam();
terrorCode = req->errorCode; terrorCode = req->errorCode;
return -1; failPtrI = triggerPtr.i;
goto fail;
} }
triggerList.next(triggerPtr); triggerList.next(triggerPtr);
} }
return 0; return 0;
fail:
req->opInfo = TuxMaintReq::OpRemove;
triggerList.first(triggerPtr);
while (triggerPtr.i != failPtrI) {
ljam();
req->indexId = triggerPtr.p->indexId;
req->errorCode = RNIL;
EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
signal, TuxMaintReq::SignalLength);
ljamEntry();
ndbrequire(req->errorCode == 0);
triggerList.next(triggerPtr);
}
#ifdef VM_TRACE
ndbout << "aborted partial tux update: op " << hex << regOperPtr << endl;
#endif
return -1;
} }
int int
...@@ -1049,7 +1058,6 @@ Dbtup::executeTuxCommitTriggers(Signal* signal, ...@@ -1049,7 +1058,6 @@ Dbtup::executeTuxCommitTriggers(Signal* signal,
{ {
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
// get version // get version
// XXX could add prevTupVersion to Operationrec
Uint32 tupVersion; Uint32 tupVersion;
if (regOperPtr->optype == ZINSERT) { if (regOperPtr->optype == ZINSERT) {
if (! regOperPtr->deleteInsertFlag) if (! regOperPtr->deleteInsertFlag)
...@@ -1087,21 +1095,7 @@ Dbtup::executeTuxCommitTriggers(Signal* signal, ...@@ -1087,21 +1095,7 @@ Dbtup::executeTuxCommitTriggers(Signal* signal,
req->pageOffset = regOperPtr->pageOffset; req->pageOffset = regOperPtr->pageOffset;
req->tupVersion = tupVersion; req->tupVersion = tupVersion;
req->opInfo = TuxMaintReq::OpRemove; req->opInfo = TuxMaintReq::OpRemove;
// loop over index list removeTuxEntries(signal, regOperPtr, regTabPtr);
const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
TriggerPtr triggerPtr;
triggerList.first(triggerPtr);
while (triggerPtr.i != RNIL) {
ljam();
req->indexId = triggerPtr.p->indexId;
req->errorCode = RNIL;
EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
signal, TuxMaintReq::SignalLength);
ljamEntry();
// commit must succeed
ndbrequire(req->errorCode == 0);
triggerList.next(triggerPtr);
}
} }
void void
...@@ -1132,7 +1126,15 @@ Dbtup::executeTuxAbortTriggers(Signal* signal, ...@@ -1132,7 +1126,15 @@ Dbtup::executeTuxAbortTriggers(Signal* signal,
req->pageOffset = regOperPtr->pageOffset; req->pageOffset = regOperPtr->pageOffset;
req->tupVersion = tupVersion; req->tupVersion = tupVersion;
req->opInfo = TuxMaintReq::OpRemove; req->opInfo = TuxMaintReq::OpRemove;
// loop over index list removeTuxEntries(signal, regOperPtr, regTabPtr);
}
void
Dbtup::removeTuxEntries(Signal* signal,
Operationrec* regOperPtr,
Tablerec* regTabPtr)
{
TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend();
const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers; const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers;
TriggerPtr triggerPtr; TriggerPtr triggerPtr;
triggerList.first(triggerPtr); triggerList.first(triggerPtr);
...@@ -1143,7 +1145,7 @@ Dbtup::executeTuxAbortTriggers(Signal* signal, ...@@ -1143,7 +1145,7 @@ Dbtup::executeTuxAbortTriggers(Signal* signal,
EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
signal, TuxMaintReq::SignalLength); signal, TuxMaintReq::SignalLength);
ljamEntry(); ljamEntry();
// abort must succeed // must succeed
ndbrequire(req->errorCode == 0); ndbrequire(req->errorCode == 0);
triggerList.next(triggerPtr); triggerList.next(triggerPtr);
} }
......
...@@ -135,6 +135,24 @@ abort DELETE none - ...@@ -135,6 +135,24 @@ abort DELETE none -
1) alternatively, store prevTupVersion in operation record. 1) alternatively, store prevTupVersion in operation record.
Abort from ordered index error
------------------------------
Obviously, index update failure causes operation failure.
The operation is then aborted later by TC.
The problem here is with multiple indexes. Some may have been
updated successfully before the one that failed. Therefore
the trigger code aborts the successful ones already in
the prepare phase.
In other words, multiple indexes are treated as one.
Abort from any cause
--------------------
[ hairy stuff ]
Read attributes, query status Read attributes, query status
----------------------------- -----------------------------
...@@ -170,14 +188,11 @@ used to decide if the scan can see the tuple. ...@@ -170,14 +188,11 @@ used to decide if the scan can see the tuple.
This signal may also be called during any phase since commit/abort This signal may also be called during any phase since commit/abort
of all operations is not done in one time-slice. of all operations is not done in one time-slice.
Commit and abort
----------------
[ hairy stuff ]
Problems Problems
-------- --------
Current abort code can destroy a tuple version too early. This Current abort code can destroy a tuple version too early. This
happens in test case "ticuur" (insert-commit-update-update-rollback), happens in test case "ticuur" (insert-commit-update-update-rollback),
if abort of first update arrives before abort of second update. if abort of first update arrives before abort of second update.
vim: set textwidth=68:
...@@ -23,6 +23,11 @@ ...@@ -23,6 +23,11 @@
int int
Dbtux::allocNode(Signal* signal, NodeHandle& node) Dbtux::allocNode(Signal* signal, NodeHandle& node)
{ {
if (ERROR_INSERTED(12007)) {
jam();
CLEAR_ERROR_INSERT_VALUE;
return TuxMaintReq::NoMemError;
}
Frag& frag = node.m_frag; Frag& frag = node.m_frag;
Uint32 pageId = NullTupLoc.getPageId(); Uint32 pageId = NullTupLoc.getPageId();
Uint32 pageOffset = NullTupLoc.getPageOffset(); Uint32 pageOffset = NullTupLoc.getPageOffset();
...@@ -34,6 +39,12 @@ Dbtux::allocNode(Signal* signal, NodeHandle& node) ...@@ -34,6 +39,12 @@ Dbtux::allocNode(Signal* signal, NodeHandle& node)
node.m_loc = TupLoc(pageId, pageOffset); node.m_loc = TupLoc(pageId, pageOffset);
node.m_node = reinterpret_cast<TreeNode*>(node32); node.m_node = reinterpret_cast<TreeNode*>(node32);
ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0); ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0);
} else {
switch (errorCode) {
case 827:
errorCode = TuxMaintReq::NoMemError;
break;
}
} }
return errorCode; return errorCode;
} }
......
...@@ -179,11 +179,11 @@ ErrorBundle ErrorCodes[] = { ...@@ -179,11 +179,11 @@ ErrorBundle ErrorCodes[] = {
*/ */
{ 623, IS, "623" }, { 623, IS, "623" },
{ 624, IS, "624" }, { 624, IS, "624" },
{ 625, IS, "Out of memory in Ndb Kernel, index part (increase IndexMemory)" }, { 625, IS, "Out of memory in Ndb Kernel, hash index part (increase IndexMemory)" },
{ 640, IS, "Too many hash indexes (should not happen)" }, { 640, IS, "Too many hash indexes (should not happen)" },
{ 826, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" }, { 826, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" },
{ 827, IS, "Out of memory in Ndb Kernel, data part (increase DataMemory)" }, { 827, IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" },
{ 902, IS, "Out of memory in Ndb Kernel, data part (increase DataMemory)" }, { 902, IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" },
{ 903, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" }, { 903, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" },
{ 904, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" }, { 904, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" },
{ 905, IS, "Out of attribute records (increase MaxNoOfAttributes)" }, { 905, IS, "Out of attribute records (increase MaxNoOfAttributes)" },
......
...@@ -164,6 +164,16 @@ irandom(unsigned n) ...@@ -164,6 +164,16 @@ irandom(unsigned n)
return i; return i;
} }
static bool
randompct(unsigned pct)
{
if (pct == 0)
return false;
if (pct >= 100)
return true;
return urandom(100) < pct;
}
// log and error macros // log and error macros
static NdbMutex *ndbout_mutex = NULL; static NdbMutex *ndbout_mutex = NULL;
...@@ -259,6 +269,8 @@ struct Par : public Opt { ...@@ -259,6 +269,8 @@ struct Par : public Opt {
bool m_verify; bool m_verify;
// deadlock possible // deadlock possible
bool m_deadlock; bool m_deadlock;
// abort percentabge
unsigned m_abortpct;
NdbOperation::LockMode m_lockmode; NdbOperation::LockMode m_lockmode;
// ordered range scan // ordered range scan
bool m_ordered; bool m_ordered;
...@@ -281,6 +293,7 @@ struct Par : public Opt { ...@@ -281,6 +293,7 @@ struct Par : public Opt {
m_randomkey(false), m_randomkey(false),
m_verify(false), m_verify(false),
m_deadlock(false), m_deadlock(false),
m_abortpct(0),
m_lockmode(NdbOperation::LM_Read), m_lockmode(NdbOperation::LM_Read),
m_ordered(false), m_ordered(false),
m_descending(false) { m_descending(false) {
...@@ -1143,7 +1156,7 @@ struct Con { ...@@ -1143,7 +1156,7 @@ struct Con {
NdbScanFilter* m_scanfilter; NdbScanFilter* m_scanfilter;
enum ScanMode { ScanNo = 0, Committed, Latest, Exclusive }; enum ScanMode { ScanNo = 0, Committed, Latest, Exclusive };
ScanMode m_scanmode; ScanMode m_scanmode;
enum ErrType { ErrNone = 0, ErrDeadlock, ErrOther }; enum ErrType { ErrNone = 0, ErrDeadlock, ErrNospace, ErrOther };
ErrType m_errtype; ErrType m_errtype;
Con() : Con() :
m_ndb(0), m_dic(0), m_tx(0), m_op(0), m_indexop(0), m_ndb(0), m_dic(0), m_tx(0), m_op(0), m_indexop(0),
...@@ -1172,7 +1185,7 @@ struct Con { ...@@ -1172,7 +1185,7 @@ struct Con {
int endFilter(); int endFilter();
int setFilter(int num, int cond, const void* value, unsigned len); int setFilter(int num, int cond, const void* value, unsigned len);
int execute(ExecType t); int execute(ExecType t);
int execute(ExecType t, bool& deadlock); int execute(ExecType t, bool& deadlock, bool& nospace);
int readTuples(Par par); int readTuples(Par par);
int readIndexTuples(Par par); int readIndexTuples(Par par);
int executeScan(); int executeScan();
...@@ -1354,17 +1367,21 @@ Con::execute(ExecType t) ...@@ -1354,17 +1367,21 @@ Con::execute(ExecType t)
} }
int int
Con::execute(ExecType t, bool& deadlock) Con::execute(ExecType t, bool& deadlock, bool& nospace)
{ {
int ret = execute(t); int ret = execute(t);
if (ret != 0) { if (ret != 0 && deadlock && m_errtype == ErrDeadlock) {
if (deadlock && m_errtype == ErrDeadlock) { LL3("caught deadlock");
LL3("caught deadlock"); ret = 0;
ret = 0;
}
} else { } else {
deadlock = false; deadlock = false;
} }
if (ret != 0 && nospace && m_errtype == ErrNospace) {
LL3("caught nospace");
ret = 0;
} else {
nospace = false;
}
CHK(ret == 0); CHK(ret == 0);
return 0; return 0;
} }
...@@ -1475,6 +1492,8 @@ Con::printerror(NdbOut& out) ...@@ -1475,6 +1492,8 @@ Con::printerror(NdbOut& out)
// 631 is new, occurs only on 4 db nodes, needs to be checked out // 631 is new, occurs only on 4 db nodes, needs to be checked out
if (code == 266 || code == 274 || code == 296 || code == 297 || code == 499 || code == 631) if (code == 266 || code == 274 || code == 296 || code == 297 || code == 499 || code == 631)
m_errtype = ErrDeadlock; m_errtype = ErrDeadlock;
if (code == 826 || code == 827 || code == 902)
m_errtype = ErrNospace;
} }
if (m_op && m_op->getNdbError().code != 0) { if (m_op && m_op->getNdbError().code != 0) {
LL0(++any << " op : error " << m_op->getNdbError()); LL0(++any << " op : error " << m_op->getNdbError());
...@@ -2480,8 +2499,8 @@ struct Set { ...@@ -2480,8 +2499,8 @@ struct Set {
void dbsave(unsigned i); void dbsave(unsigned i);
void calc(Par par, unsigned i, unsigned mask = 0); void calc(Par par, unsigned i, unsigned mask = 0);
bool pending(unsigned i, unsigned mask) const; bool pending(unsigned i, unsigned mask) const;
void notpending(unsigned i); void notpending(unsigned i, ExecType et = Commit);
void notpending(const Lst& lst); void notpending(const Lst& lst, ExecType et = Commit);
void dbdiscard(unsigned i); void dbdiscard(unsigned i);
void dbdiscard(const Lst& lst); void dbdiscard(const Lst& lst);
const Row& dbrow(unsigned i) const; const Row& dbrow(unsigned i) const;
...@@ -2620,26 +2639,30 @@ Set::pending(unsigned i, unsigned mask) const ...@@ -2620,26 +2639,30 @@ Set::pending(unsigned i, unsigned mask) const
} }
void void
Set::notpending(unsigned i) Set::notpending(unsigned i, ExecType et)
{ {
assert(m_row[i] != 0); assert(m_row[i] != 0);
Row& row = *m_row[i]; Row& row = *m_row[i];
if (row.m_pending == Row::InsOp) { if (et == Commit) {
row.m_exist = true; if (row.m_pending == Row::InsOp)
} else if (row.m_pending == Row::UpdOp) { row.m_exist = true;
; if (row.m_pending == Row::DelOp)
} else if (row.m_pending == Row::DelOp) { row.m_exist = false;
row.m_exist = false; } else {
if (row.m_pending == Row::InsOp)
row.m_exist = false;
if (row.m_pending == Row::DelOp)
row.m_exist = true;
} }
row.m_pending = Row::NoOp; row.m_pending = Row::NoOp;
} }
void void
Set::notpending(const Lst& lst) Set::notpending(const Lst& lst, ExecType et)
{ {
for (unsigned j = 0; j < lst.m_cnt; j++) { for (unsigned j = 0; j < lst.m_cnt; j++) {
unsigned i = lst.m_arr[j]; unsigned i = lst.m_arr[j];
notpending(i); notpending(i, et);
} }
} }
...@@ -2831,8 +2854,6 @@ Set::putval(unsigned i, bool force, unsigned n) ...@@ -2831,8 +2854,6 @@ Set::putval(unsigned i, bool force, unsigned n)
return 0; return 0;
} }
// verify
int int
Set::verify(Par par, const Set& set2) const Set::verify(Par par, const Set& set2) const
{ {
...@@ -3213,14 +3234,20 @@ pkinsert(Par par) ...@@ -3213,14 +3234,20 @@ pkinsert(Par par)
lst.push(i); lst.push(i);
if (lst.cnt() == par.m_batch) { if (lst.cnt() == par.m_batch) {
bool deadlock = par.m_deadlock; bool deadlock = par.m_deadlock;
CHK(con.execute(Commit, deadlock) == 0); bool nospace = true;
ExecType et = randompct(par.m_abortpct) ? Rollback : Commit;
CHK(con.execute(et, deadlock, nospace) == 0);
con.closeTransaction(); con.closeTransaction();
if (deadlock) { if (deadlock) {
LL1("pkinsert: stop on deadlock [at 1]"); LL1("pkinsert: stop on deadlock [at 1]");
return 0; return 0;
} }
if (nospace) {
LL1("pkinsert: cnt=" << j << " stop on nospace");
return 0;
}
set.lock(); set.lock();
set.notpending(lst); set.notpending(lst, et);
set.unlock(); set.unlock();
lst.reset(); lst.reset();
CHK(con.startTransaction() == 0); CHK(con.startTransaction() == 0);
...@@ -3228,14 +3255,20 @@ pkinsert(Par par) ...@@ -3228,14 +3255,20 @@ pkinsert(Par par)
} }
if (lst.cnt() != 0) { if (lst.cnt() != 0) {
bool deadlock = par.m_deadlock; bool deadlock = par.m_deadlock;
CHK(con.execute(Commit, deadlock) == 0); bool nospace = true;
ExecType et = randompct(par.m_abortpct) ? Rollback : Commit;
CHK(con.execute(et, deadlock, nospace) == 0);
con.closeTransaction(); con.closeTransaction();
if (deadlock) { if (deadlock) {
LL1("pkinsert: stop on deadlock [at 2]"); LL1("pkinsert: stop on deadlock [at 2]");
return 0; return 0;
} }
if (nospace) {
LL1("pkinsert: end: stop on nospace");
return 0;
}
set.lock(); set.lock();
set.notpending(lst); set.notpending(lst, et);
set.unlock(); set.unlock();
return 0; return 0;
} }
...@@ -3253,6 +3286,7 @@ pkupdate(Par par) ...@@ -3253,6 +3286,7 @@ pkupdate(Par par)
CHK(con.startTransaction() == 0); CHK(con.startTransaction() == 0);
Lst lst; Lst lst;
bool deadlock = false; bool deadlock = false;
bool nospace = false;
for (unsigned j = 0; j < par.m_rows; j++) { for (unsigned j = 0; j < par.m_rows; j++) {
unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows);
unsigned i = thrrow(par, j2); unsigned i = thrrow(par, j2);
...@@ -3269,28 +3303,38 @@ pkupdate(Par par) ...@@ -3269,28 +3303,38 @@ pkupdate(Par par)
lst.push(i); lst.push(i);
if (lst.cnt() == par.m_batch) { if (lst.cnt() == par.m_batch) {
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
CHK(con.execute(Commit, deadlock) == 0); nospace = true;
ExecType et = randompct(par.m_abortpct) ? Rollback : Commit;
CHK(con.execute(et, deadlock, nospace) == 0);
if (deadlock) { if (deadlock) {
LL1("pkupdate: stop on deadlock [at 1]"); LL1("pkupdate: stop on deadlock [at 1]");
break; break;
} }
if (nospace) {
LL1("pkupdate: cnt=" << j << " stop on nospace [at 1]");
break;
}
con.closeTransaction(); con.closeTransaction();
set.lock(); set.lock();
set.notpending(lst); set.notpending(lst, et);
set.dbdiscard(lst); set.dbdiscard(lst);
set.unlock(); set.unlock();
lst.reset(); lst.reset();
CHK(con.startTransaction() == 0); CHK(con.startTransaction() == 0);
} }
} }
if (! deadlock && lst.cnt() != 0) { if (! deadlock && ! nospace && lst.cnt() != 0) {
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
CHK(con.execute(Commit, deadlock) == 0); nospace = true;
ExecType et = randompct(par.m_abortpct) ? Rollback : Commit;
CHK(con.execute(et, deadlock, nospace) == 0);
if (deadlock) { if (deadlock) {
LL1("pkupdate: stop on deadlock [at 1]"); LL1("pkupdate: stop on deadlock [at 2]");
} else if (nospace) {
LL1("pkupdate: end: stop on nospace [at 2]");
} else { } else {
set.lock(); set.lock();
set.notpending(lst); set.notpending(lst, et);
set.dbdiscard(lst); set.dbdiscard(lst);
set.unlock(); set.unlock();
} }
...@@ -3309,6 +3353,7 @@ pkdelete(Par par) ...@@ -3309,6 +3353,7 @@ pkdelete(Par par)
CHK(con.startTransaction() == 0); CHK(con.startTransaction() == 0);
Lst lst; Lst lst;
bool deadlock = false; bool deadlock = false;
bool nospace = false;
for (unsigned j = 0; j < par.m_rows; j++) { for (unsigned j = 0; j < par.m_rows; j++) {
unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows);
unsigned i = thrrow(par, j2); unsigned i = thrrow(par, j2);
...@@ -3323,27 +3368,31 @@ pkdelete(Par par) ...@@ -3323,27 +3368,31 @@ pkdelete(Par par)
lst.push(i); lst.push(i);
if (lst.cnt() == par.m_batch) { if (lst.cnt() == par.m_batch) {
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
CHK(con.execute(Commit, deadlock) == 0); nospace = true;
ExecType et = randompct(par.m_abortpct) ? Rollback : Commit;
CHK(con.execute(et, deadlock, nospace) == 0);
if (deadlock) { if (deadlock) {
LL1("pkdelete: stop on deadlock [at 1]"); LL1("pkdelete: stop on deadlock [at 1]");
break; break;
} }
con.closeTransaction(); con.closeTransaction();
set.lock(); set.lock();
set.notpending(lst); set.notpending(lst, et);
set.unlock(); set.unlock();
lst.reset(); lst.reset();
CHK(con.startTransaction() == 0); CHK(con.startTransaction() == 0);
} }
} }
if (! deadlock && lst.cnt() != 0) { if (! deadlock && ! nospace && lst.cnt() != 0) {
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
CHK(con.execute(Commit, deadlock) == 0); nospace = true;
ExecType et = randompct(par.m_abortpct) ? Rollback : Commit;
CHK(con.execute(et, deadlock, nospace) == 0);
if (deadlock) { if (deadlock) {
LL1("pkdelete: stop on deadlock [at 2]"); LL1("pkdelete: stop on deadlock [at 2]");
} else { } else {
set.lock(); set.lock();
set.notpending(lst); set.notpending(lst, et);
set.unlock(); set.unlock();
} }
} }
...@@ -3418,6 +3467,7 @@ hashindexupdate(Par par, const ITab& itab) ...@@ -3418,6 +3467,7 @@ hashindexupdate(Par par, const ITab& itab)
CHK(con.startTransaction() == 0); CHK(con.startTransaction() == 0);
Lst lst; Lst lst;
bool deadlock = false; bool deadlock = false;
bool nospace = false;
for (unsigned j = 0; j < par.m_rows; j++) { for (unsigned j = 0; j < par.m_rows; j++) {
unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows);
unsigned i = thrrow(par, j2); unsigned i = thrrow(par, j2);
...@@ -3435,7 +3485,7 @@ hashindexupdate(Par par, const ITab& itab) ...@@ -3435,7 +3485,7 @@ hashindexupdate(Par par, const ITab& itab)
lst.push(i); lst.push(i);
if (lst.cnt() == par.m_batch) { if (lst.cnt() == par.m_batch) {
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
CHK(con.execute(Commit, deadlock) == 0); CHK(con.execute(Commit, deadlock, nospace) == 0);
if (deadlock) { if (deadlock) {
LL1("hashindexupdate: stop on deadlock [at 1]"); LL1("hashindexupdate: stop on deadlock [at 1]");
break; break;
...@@ -3451,9 +3501,9 @@ hashindexupdate(Par par, const ITab& itab) ...@@ -3451,9 +3501,9 @@ hashindexupdate(Par par, const ITab& itab)
} }
if (! deadlock && lst.cnt() != 0) { if (! deadlock && lst.cnt() != 0) {
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
CHK(con.execute(Commit, deadlock) == 0); CHK(con.execute(Commit, deadlock, nospace) == 0);
if (deadlock) { if (deadlock) {
LL1("hashindexupdate: stop on deadlock [at 1]"); LL1("hashindexupdate: stop on deadlock [at 2]");
} else { } else {
set.lock(); set.lock();
set.notpending(lst); set.notpending(lst);
...@@ -3474,6 +3524,7 @@ hashindexdelete(Par par, const ITab& itab) ...@@ -3474,6 +3524,7 @@ hashindexdelete(Par par, const ITab& itab)
CHK(con.startTransaction() == 0); CHK(con.startTransaction() == 0);
Lst lst; Lst lst;
bool deadlock = false; bool deadlock = false;
bool nospace = false;
for (unsigned j = 0; j < par.m_rows; j++) { for (unsigned j = 0; j < par.m_rows; j++) {
unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows);
unsigned i = thrrow(par, j2); unsigned i = thrrow(par, j2);
...@@ -3488,7 +3539,7 @@ hashindexdelete(Par par, const ITab& itab) ...@@ -3488,7 +3539,7 @@ hashindexdelete(Par par, const ITab& itab)
lst.push(i); lst.push(i);
if (lst.cnt() == par.m_batch) { if (lst.cnt() == par.m_batch) {
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
CHK(con.execute(Commit, deadlock) == 0); CHK(con.execute(Commit, deadlock, nospace) == 0);
if (deadlock) { if (deadlock) {
LL1("hashindexdelete: stop on deadlock [at 1]"); LL1("hashindexdelete: stop on deadlock [at 1]");
break; break;
...@@ -3503,7 +3554,7 @@ hashindexdelete(Par par, const ITab& itab) ...@@ -3503,7 +3554,7 @@ hashindexdelete(Par par, const ITab& itab)
} }
if (! deadlock && lst.cnt() != 0) { if (! deadlock && lst.cnt() != 0) {
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
CHK(con.execute(Commit, deadlock) == 0); CHK(con.execute(Commit, deadlock, nospace) == 0);
if (deadlock) { if (deadlock) {
LL1("hashindexdelete: stop on deadlock [at 2]"); LL1("hashindexdelete: stop on deadlock [at 2]");
} else { } else {
...@@ -3875,6 +3926,7 @@ scanupdatetable(Par par) ...@@ -3875,6 +3926,7 @@ scanupdatetable(Par par)
CHK(con2.startTransaction() == 0); CHK(con2.startTransaction() == 0);
Lst lst; Lst lst;
bool deadlock = false; bool deadlock = false;
bool nospace = false;
while (1) { while (1) {
int ret; int ret;
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
...@@ -3910,7 +3962,7 @@ scanupdatetable(Par par) ...@@ -3910,7 +3962,7 @@ scanupdatetable(Par par)
set.unlock(); set.unlock();
if (lst.cnt() == par.m_batch) { if (lst.cnt() == par.m_batch) {
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
CHK(con2.execute(Commit, deadlock) == 0); CHK(con2.execute(Commit, deadlock, nospace) == 0);
if (deadlock) { if (deadlock) {
LL1("scanupdatetable: stop on deadlock [at 2]"); LL1("scanupdatetable: stop on deadlock [at 2]");
goto out; goto out;
...@@ -3927,7 +3979,7 @@ scanupdatetable(Par par) ...@@ -3927,7 +3979,7 @@ scanupdatetable(Par par)
CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2); CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2);
if (ret == 2 && lst.cnt() != 0) { if (ret == 2 && lst.cnt() != 0) {
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
CHK(con2.execute(Commit, deadlock) == 0); CHK(con2.execute(Commit, deadlock, nospace) == 0);
if (deadlock) { if (deadlock) {
LL1("scanupdatetable: stop on deadlock [at 3]"); LL1("scanupdatetable: stop on deadlock [at 3]");
goto out; goto out;
...@@ -3974,6 +4026,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) ...@@ -3974,6 +4026,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
CHK(con2.startTransaction() == 0); CHK(con2.startTransaction() == 0);
Lst lst; Lst lst;
bool deadlock = false; bool deadlock = false;
bool nospace = false;
while (1) { while (1) {
int ret; int ret;
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
...@@ -4009,7 +4062,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) ...@@ -4009,7 +4062,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
set.unlock(); set.unlock();
if (lst.cnt() == par.m_batch) { if (lst.cnt() == par.m_batch) {
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
CHK(con2.execute(Commit, deadlock) == 0); CHK(con2.execute(Commit, deadlock, nospace) == 0);
if (deadlock) { if (deadlock) {
LL1("scanupdateindex: stop on deadlock [at 2]"); LL1("scanupdateindex: stop on deadlock [at 2]");
goto out; goto out;
...@@ -4026,7 +4079,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) ...@@ -4026,7 +4079,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2); CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2);
if (ret == 2 && lst.cnt() != 0) { if (ret == 2 && lst.cnt() != 0) {
deadlock = par.m_deadlock; deadlock = par.m_deadlock;
CHK(con2.execute(Commit, deadlock) == 0); CHK(con2.execute(Commit, deadlock, nospace) == 0);
if (deadlock) { if (deadlock) {
LL1("scanupdateindex: stop on deadlock [at 3]"); LL1("scanupdateindex: stop on deadlock [at 3]");
goto out; goto out;
...@@ -4094,6 +4147,10 @@ readverify(Par par) ...@@ -4094,6 +4147,10 @@ readverify(Par par)
if (par.m_noverify) if (par.m_noverify)
return 0; return 0;
par.m_verify = true; par.m_verify = true;
if (par.m_abortpct != 0) {
LL2("skip verify in this version"); // implement in 5.0 version
par.m_verify = false;
}
par.m_lockmode = NdbOperation::LM_CommittedRead; par.m_lockmode = NdbOperation::LM_CommittedRead;
CHK(pkread(par) == 0); CHK(pkread(par) == 0);
CHK(scanreadall(par) == 0); CHK(scanreadall(par) == 0);
...@@ -4106,6 +4163,10 @@ readverifyfull(Par par) ...@@ -4106,6 +4163,10 @@ readverifyfull(Par par)
if (par.m_noverify) if (par.m_noverify)
return 0; return 0;
par.m_verify = true; par.m_verify = true;
if (par.m_abortpct != 0) {
LL2("skip verify in this version"); // implement in 5.0 version
par.m_verify = false;
}
par.m_lockmode = NdbOperation::LM_CommittedRead; par.m_lockmode = NdbOperation::LM_CommittedRead;
const Tab& tab = par.tab(); const Tab& tab = par.tab();
if (par.m_no == 0) { if (par.m_no == 0) {
...@@ -4457,11 +4518,11 @@ runstep(Par par, const char* fname, TFunc func, unsigned mode) ...@@ -4457,11 +4518,11 @@ runstep(Par par, const char* fname, TFunc func, unsigned mode)
for (n = 0; n < threads; n++) { for (n = 0; n < threads; n++) {
LL4("start " << n); LL4("start " << n);
Thr& thr = *g_thrlist[n]; Thr& thr = *g_thrlist[n];
thr.m_par.m_tab = par.m_tab; Par oldpar = thr.m_par;
thr.m_par.m_set = par.m_set; // update parameters
thr.m_par.m_tmr = par.m_tmr; thr.m_par = par;
thr.m_par.m_lno = par.m_lno; thr.m_par.m_no = oldpar.m_no;
thr.m_par.m_slno = par.m_slno; thr.m_par.m_con = oldpar.m_con;
thr.m_func = func; thr.m_func = func;
thr.start(); thr.start();
} }
...@@ -4590,6 +4651,24 @@ tbusybuild(Par par) ...@@ -4590,6 +4651,24 @@ tbusybuild(Par par)
return 0; return 0;
} }
static int
trollback(Par par)
{
par.m_abortpct = 50;
RUNSTEP(par, droptable, ST);
RUNSTEP(par, createtable, ST);
RUNSTEP(par, invalidatetable, MT);
RUNSTEP(par, pkinsert, MT);
RUNSTEP(par, createindex, ST);
RUNSTEP(par, invalidateindex, MT);
RUNSTEP(par, readverify, ST);
for (par.m_slno = 0; par.m_slno < par.m_subloop; par.m_slno++) {
RUNSTEP(par, mixedoperations, MT);
RUNSTEP(par, readverify, ST);
}
return 0;
}
static int static int
ttimebuild(Par par) ttimebuild(Par par)
{ {
...@@ -4712,6 +4791,7 @@ tcaselist[] = { ...@@ -4712,6 +4791,7 @@ tcaselist[] = {
TCase("d", tpkopsread, "pk operations and scan reads"), TCase("d", tpkopsread, "pk operations and scan reads"),
TCase("e", tmixedops, "pk operations and scan operations"), TCase("e", tmixedops, "pk operations and scan operations"),
TCase("f", tbusybuild, "pk operations and index build"), TCase("f", tbusybuild, "pk operations and index build"),
TCase("g", trollback, "operations with random rollbacks"),
TCase("t", ttimebuild, "time index build"), TCase("t", ttimebuild, "time index build"),
TCase("u", ttimemaint, "time index maintenance"), TCase("u", ttimemaint, "time index maintenance"),
TCase("v", ttimescan, "time full scan table vs index on pk"), TCase("v", ttimescan, "time full scan table vs index on pk"),
......
...@@ -4111,18 +4111,6 @@ int ha_ndbcluster::drop_table() ...@@ -4111,18 +4111,6 @@ int ha_ndbcluster::drop_table()
} }
/*
Drop a database in NDB Cluster
*/
int ndbcluster_drop_database(const char *path)
{
DBUG_ENTER("ndbcluster_drop_database");
// TODO drop all tables for this database
DBUG_RETURN(1);
}
ulonglong ha_ndbcluster::get_auto_increment() ulonglong ha_ndbcluster::get_auto_increment()
{ {
int cache_size; int cache_size;
...@@ -4477,6 +4465,53 @@ extern "C" byte* tables_get_key(const char *entry, uint *length, ...@@ -4477,6 +4465,53 @@ extern "C" byte* tables_get_key(const char *entry, uint *length,
} }
/*
Drop a database in NDB Cluster
*/
int ndbcluster_drop_database(const char *path)
{
DBUG_ENTER("ndbcluster_drop_database");
THD *thd= current_thd;
char dbname[FN_HEADLEN];
Ndb* ndb;
NdbDictionary::Dictionary::List list;
uint i;
char *tabname;
List<char> drop_list;
ha_ndbcluster::set_dbname(path, (char *)&dbname);
DBUG_PRINT("enter", ("db: %s", dbname));
if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(HA_ERR_NO_CONNECTION);
// List tables in NDB
NDBDICT *dict= ndb->getDictionary();
if (dict->listObjects(list,
NdbDictionary::Object::UserTable) != 0)
ERR_RETURN(dict->getNdbError());
for (i= 0 ; i < list.count ; i++)
{
NdbDictionary::Dictionary::List::Element& t= list.elements[i];
DBUG_PRINT("info", ("Found %s/%s in NDB", t.database, t.name));
// Add only tables that belongs to db
if (my_strcasecmp(system_charset_info, t.database, dbname))
continue;
DBUG_PRINT("info", ("%s must be dropped", t.name));
drop_list.push_back(thd->strdup(t.name));
}
// Drop any tables belonging to database
ndb->setDatabaseName(dbname);
List_iterator_fast<char> it(drop_list);
while ((tabname=it++))
if (dict->dropTable(tabname))
ERR_RETURN(dict->getNdbError());
DBUG_RETURN(0);
}
int ndbcluster_find_files(THD *thd,const char *db,const char *path, int ndbcluster_find_files(THD *thd,const char *db,const char *path,
const char *wild, bool dir, List<char> *files) const char *wild, bool dir, List<char> *files)
{ {
...@@ -4797,26 +4832,31 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op) ...@@ -4797,26 +4832,31 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op)
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
/* /**
Set m_tabname from full pathname to table file * Set a given location from full pathname to database name
*
*/ */
void ha_ndbcluster::set_dbname(const char *path_name, char *dbname)
void ha_ndbcluster::set_tabname(const char *path_name)
{ {
char *end, *ptr; char *end, *ptr;
/* Scan name from the end */ /* Scan name from the end */
end= strend(path_name)-1; ptr= strend(path_name)-1;
ptr= end; while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
ptr--;
}
ptr--;
end= ptr;
while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
ptr--; ptr--;
} }
uint name_len= end - ptr; uint name_len= end - ptr;
memcpy(m_tabname, ptr + 1, end - ptr); memcpy(dbname, ptr + 1, name_len);
m_tabname[name_len]= '\0'; dbname[name_len]= '\0';
#ifdef __WIN__ #ifdef __WIN__
/* Put to lower case */ /* Put to lower case */
ptr= m_tabname;
ptr= dbname;
while (*ptr != '\0') { while (*ptr != '\0') {
*ptr= tolower(*ptr); *ptr= tolower(*ptr);
...@@ -4825,6 +4865,15 @@ void ha_ndbcluster::set_tabname(const char *path_name) ...@@ -4825,6 +4865,15 @@ void ha_ndbcluster::set_tabname(const char *path_name)
#endif #endif
} }
/*
Set m_dbname from full pathname to table file
*/
void ha_ndbcluster::set_dbname(const char *path_name)
{
set_dbname(path_name, m_dbname);
}
/** /**
* Set a given location from full pathname to table file * Set a given location from full pathname to table file
* *
...@@ -4854,39 +4903,13 @@ ha_ndbcluster::set_tabname(const char *path_name, char * tabname) ...@@ -4854,39 +4903,13 @@ ha_ndbcluster::set_tabname(const char *path_name, char * tabname)
#endif #endif
} }
/* /*
Set m_dbname from full pathname to table file Set m_tabname from full pathname to table file
*/ */
void ha_ndbcluster::set_dbname(const char *path_name) void ha_ndbcluster::set_tabname(const char *path_name)
{ {
char *end, *ptr; set_tabname(path_name, m_tabname);
/* Scan name from the end */
ptr= strend(path_name)-1;
while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
ptr--;
}
ptr--;
end= ptr;
while (ptr >= path_name && *ptr != '\\' && *ptr != '/') {
ptr--;
}
uint name_len= end - ptr;
memcpy(m_dbname, ptr + 1, name_len);
m_dbname[name_len]= '\0';
#ifdef __WIN__
/* Put to lower case */
ptr= m_dbname;
while (*ptr != '\0') {
*ptr= tolower(*ptr);
ptr++;
}
#endif
} }
......
...@@ -457,6 +457,9 @@ class ha_ndbcluster: public handler ...@@ -457,6 +457,9 @@ class ha_ndbcluster: public handler
static Thd_ndb* seize_thd_ndb(); static Thd_ndb* seize_thd_ndb();
static void release_thd_ndb(Thd_ndb* thd_ndb); static void release_thd_ndb(Thd_ndb* thd_ndb);
static void set_dbname(const char *pathname, char *dbname);
static void set_tabname(const char *pathname, char *tabname);
/* /*
Condition pushdown Condition pushdown
*/ */
...@@ -537,7 +540,6 @@ private: ...@@ -537,7 +540,6 @@ private:
void set_dbname(const char *pathname); void set_dbname(const char *pathname);
void set_tabname(const char *pathname); void set_tabname(const char *pathname);
void set_tabname(const char *pathname, char *tabname);
bool set_hidden_key(NdbOperation*, bool set_hidden_key(NdbOperation*,
uint fieldnr, const byte* field_ptr); uint fieldnr, const byte* field_ptr);
......
...@@ -70,10 +70,10 @@ struct show_table_type_st sys_table_types[]= ...@@ -70,10 +70,10 @@ struct show_table_type_st sys_table_types[]=
{ {
{"MyISAM", &have_yes, {"MyISAM", &have_yes,
"Default engine as of MySQL 3.23 with great performance", DB_TYPE_MYISAM}, "Default engine as of MySQL 3.23 with great performance", DB_TYPE_MYISAM},
{"HEAP", &have_yes,
"Alias for MEMORY", DB_TYPE_HEAP},
{"MEMORY", &have_yes, {"MEMORY", &have_yes,
"Hash based, stored in memory, useful for temporary tables", DB_TYPE_HEAP}, "Hash based, stored in memory, useful for temporary tables", DB_TYPE_HEAP},
{"HEAP", &have_yes,
"Alias for MEMORY", DB_TYPE_HEAP},
{"MERGE", &have_yes, {"MERGE", &have_yes,
"Collection of identical MyISAM tables", DB_TYPE_MRG_MYISAM}, "Collection of identical MyISAM tables", DB_TYPE_MRG_MYISAM},
{"MRG_MYISAM",&have_yes, {"MRG_MYISAM",&have_yes,
......
...@@ -2857,6 +2857,13 @@ int TC_LOG_BINLOG::open(const char *opt_name) ...@@ -2857,6 +2857,13 @@ int TC_LOG_BINLOG::open(const char *opt_name)
pthread_mutex_init(&LOCK_prep_xids, MY_MUTEX_INIT_FAST); pthread_mutex_init(&LOCK_prep_xids, MY_MUTEX_INIT_FAST);
pthread_cond_init (&COND_prep_xids, 0); pthread_cond_init (&COND_prep_xids, 0);
if (!my_b_inited(&index_file))
{
/* There was a failure to open the index file, can't open the binlog */
cleanup();
return 1;
}
if (using_heuristic_recover()) if (using_heuristic_recover())
{ {
/* generate a new binlog to mask a corrupted one */ /* generate a new binlog to mask a corrupted one */
......
...@@ -847,7 +847,6 @@ struct show_var_st init_vars[]= { ...@@ -847,7 +847,6 @@ struct show_var_st init_vars[]= {
{"log_slave_updates", (char*) &opt_log_slave_updates, SHOW_MY_BOOL}, {"log_slave_updates", (char*) &opt_log_slave_updates, SHOW_MY_BOOL},
#endif #endif
{"log_slow_queries", (char*) &opt_slow_log, SHOW_BOOL}, {"log_slow_queries", (char*) &opt_slow_log, SHOW_BOOL},
{"log_update", (char*) &opt_update_log, SHOW_BOOL},
{sys_log_warnings.name, (char*) &sys_log_warnings, SHOW_SYS}, {sys_log_warnings.name, (char*) &sys_log_warnings, SHOW_SYS},
{sys_long_query_time.name, (char*) &sys_long_query_time, SHOW_SYS}, {sys_long_query_time.name, (char*) &sys_long_query_time, SHOW_SYS},
{sys_low_priority_updates.name, (char*) &sys_low_priority_updates, SHOW_SYS}, {sys_low_priority_updates.name, (char*) &sys_low_priority_updates, SHOW_SYS},
......
...@@ -113,7 +113,7 @@ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, ...@@ -113,7 +113,7 @@ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level,
!(thd->options & OPTION_SQL_NOTES)) !(thd->options & OPTION_SQL_NOTES))
DBUG_RETURN(0); DBUG_RETURN(0);
if (thd->query_id != thd->warn_id) if (thd->query_id != thd->warn_id && !thd->spcont)
mysql_reset_errors(thd, 0); mysql_reset_errors(thd, 0);
thd->got_warning= 1; thd->got_warning= 1;
......
...@@ -103,7 +103,8 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, ...@@ -103,7 +103,8 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
return -1; return -1;
} }
#endif #endif
*(int*)&table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; clear_timestamp_auto_bits(table->timestamp_field_type,
TIMESTAMP_AUTO_SET_ON_INSERT);
} }
else else
{ // Part field list { // Part field list
...@@ -150,7 +151,8 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, ...@@ -150,7 +151,8 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
} }
if (table->timestamp_field && // Don't set timestamp if used if (table->timestamp_field && // Don't set timestamp if used
table->timestamp_field->query_id == thd->query_id) table->timestamp_field->query_id == thd->query_id)
*(int*)&table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_INSERT; clear_timestamp_auto_bits(table->timestamp_field_type,
TIMESTAMP_AUTO_SET_ON_INSERT);
} }
// For the values we need select_priv // For the values we need select_priv
#ifndef NO_EMBEDDED_ACCESS_CHECKS #ifndef NO_EMBEDDED_ACCESS_CHECKS
...@@ -216,7 +218,8 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list, ...@@ -216,7 +218,8 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
{ {
/* Don't set timestamp column if this is modified. */ /* Don't set timestamp column if this is modified. */
if (table->timestamp_field->query_id == thd->query_id) if (table->timestamp_field->query_id == thd->query_id)
*(int*)&table->timestamp_field_type&= ~ (int) TIMESTAMP_AUTO_SET_ON_UPDATE; clear_timestamp_auto_bits(table->timestamp_field_type,
TIMESTAMP_AUTO_SET_ON_UPDATE);
else else
table->timestamp_field->query_id= timestamp_query_id; table->timestamp_field->query_id= timestamp_query_id;
} }
......
...@@ -2268,9 +2268,11 @@ mysql_execute_command(THD *thd) ...@@ -2268,9 +2268,11 @@ mysql_execute_command(THD *thd)
A better approach would be to reset this for any commands A better approach would be to reset this for any commands
that is not a SHOW command or a select that only access local that is not a SHOW command or a select that only access local
variables, but for now this is probably good enough. variables, but for now this is probably good enough.
Don't reset warnings when executing a stored routine.
*/ */
if (all_tables || &lex->select_lex != lex->all_selects_list || if ((all_tables || &lex->select_lex != lex->all_selects_list ||
lex->spfuns.records || lex->spprocs.records) lex->spfuns.records || lex->spprocs.records) &&
!thd->spcont)
mysql_reset_errors(thd, 0); mysql_reset_errors(thd, 0);
#ifdef HAVE_REPLICATION #ifdef HAVE_REPLICATION
......
...@@ -71,18 +71,22 @@ typedef struct st_filesort_info ...@@ -71,18 +71,22 @@ typedef struct st_filesort_info
/* /*
Values in this enum are used to indicate during which operations value Values in this enum are used to indicate how a tables TIMESTAMP field
of TIMESTAMP field should be set to current timestamp. should be treated. It can be set to the current timestamp on insert or
WARNING: The values are used for bit operations. If you change the enum, update or both.
you must keep the bitwise relation of the values. For example: WARNING: The values are used for bit operations. If you change the
(int) TIMESTAMP_AUTO_SET_ON_BOTH == enum, you must keep the bitwise relation of the values. For example:
(int) TIMESTAMP_AUTO_SET_ON_INSERT | (int) TIMESTAMP_AUTO_SET_ON_UPDATE. (int) TIMESTAMP_AUTO_SET_ON_BOTH must be equal to
(int) TIMESTAMP_AUTO_SET_ON_INSERT | (int) TIMESTAMP_AUTO_SET_ON_UPDATE.
We use an enum here so that the debugger can display the value names.
*/ */
enum timestamp_auto_set_type enum timestamp_auto_set_type
{ {
TIMESTAMP_NO_AUTO_SET= 0, TIMESTAMP_AUTO_SET_ON_INSERT= 1, TIMESTAMP_NO_AUTO_SET= 0, TIMESTAMP_AUTO_SET_ON_INSERT= 1,
TIMESTAMP_AUTO_SET_ON_UPDATE= 2, TIMESTAMP_AUTO_SET_ON_BOTH= 3 TIMESTAMP_AUTO_SET_ON_UPDATE= 2, TIMESTAMP_AUTO_SET_ON_BOTH= 3
}; };
#define clear_timestamp_auto_bits(_target_, _bits_) \
(_target_)= (enum timestamp_auto_set_type)((int)(_target_) & ~(int)(_bits_))
class Field_timestamp; class Field_timestamp;
class Field_blob; class Field_blob;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment