Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
c1d6b29c
Commit
c1d6b29c
authored
Apr 25, 2005
by
unknown
Browse files
Options
Browse Files
Download
Plain Diff
Merge spetrunia@bk-internal.mysql.com:/home/bk/mysql-5.0
into mysql.com:/home/psergey/mysql-5.0-bug8490-2
parents
1bfb1070
c153e612
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
256 additions
and
115 deletions
+256
-115
mysql-test/r/create.result
mysql-test/r/create.result
+4
-4
mysql-test/r/ps_1general.result
mysql-test/r/ps_1general.result
+1
-1
mysql-test/r/variables.result
mysql-test/r/variables.result
+2
-2
ndb/src/kernel/blocks/ERROR_codes.txt
ndb/src/kernel/blocks/ERROR_codes.txt
+2
-1
ndb/src/kernel/blocks/dbtup/Dbtup.hpp
ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+8
-0
ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
+41
-39
ndb/src/kernel/blocks/dbtup/Notes.txt
ndb/src/kernel/blocks/dbtup/Notes.txt
+20
-5
ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
+11
-0
ndb/src/ndbapi/ndberror.c
ndb/src/ndbapi/ndberror.c
+3
-3
ndb/test/ndbapi/testOIBasic.cpp
ndb/test/ndbapi/testOIBasic.cpp
+130
-50
scripts/mysql_tableinfo.sh
scripts/mysql_tableinfo.sh
+25
-7
sql/handler.cc
sql/handler.cc
+2
-2
sql/log.cc
sql/log.cc
+7
-0
sql/set_var.cc
sql/set_var.cc
+0
-1
No files found.
mysql-test/r/create.result
View file @
c1d6b29c
...
...
@@ -210,7 +210,7 @@ drop table if exists t1;
SET SESSION storage_engine="heap";
SELECT @@storage_engine;
@@storage_engine
HEAP
MEMORY
CREATE TABLE t1 (a int not null);
show create table t1;
Table Create Table
...
...
@@ -222,7 +222,7 @@ SET SESSION storage_engine="gemini";
ERROR 42000: Unknown table engine 'gemini'
SELECT @@storage_engine;
@@storage_engine
HEAP
MEMORY
CREATE TABLE t1 (a int not null);
show create table t1;
Table Create Table
...
...
@@ -371,7 +371,7 @@ drop database mysqltest;
SET SESSION storage_engine="heap";
SELECT @@storage_engine;
@@storage_engine
HEAP
MEMORY
CREATE TABLE t1 (a int not null);
show create table t1;
Table Create Table
...
...
@@ -383,7 +383,7 @@ SET SESSION storage_engine="gemini";
ERROR 42000: Unknown table engine 'gemini'
SELECT @@storage_engine;
@@storage_engine
HEAP
MEMORY
CREATE TABLE t1 (a int not null);
show create table t1;
Table Create Table
...
...
mysql-test/r/ps_1general.result
View file @
c1d6b29c
...
...
@@ -322,8 +322,8 @@ prepare stmt4 from ' show storage engines ';
execute stmt4;
Engine Support Comment
MyISAM YES/NO Default engine as of MySQL 3.23 with great performance
HEAP YES/NO Alias for MEMORY
MEMORY YES/NO Hash based, stored in memory, useful for temporary tables
HEAP YES/NO Alias for MEMORY
MERGE YES/NO Collection of identical MyISAM tables
MRG_MYISAM YES/NO Alias for MERGE
ISAM YES/NO Obsolete storage engine, now replaced by MyISAM
...
...
mysql-test/r/variables.result
View file @
c1d6b29c
...
...
@@ -148,7 +148,7 @@ timed_mutexes OFF
set storage_engine=MYISAM, storage_engine="HEAP", global storage_engine="MERGE";
show local variables like 'storage_engine';
Variable_name Value
storage_engine
HEAP
storage_engine
MEMORY
show global variables like 'storage_engine';
Variable_name Value
storage_engine MERGE
...
...
@@ -254,7 +254,7 @@ set storage_engine=MERGE, big_tables=2;
ERROR 42000: Variable 'big_tables' can't be set to the value of '2'
show local variables like 'storage_engine';
Variable_name Value
storage_engine
HEAP
storage_engine
MEMORY
set SESSION query_cache_size=10000;
ERROR HY000: Variable 'query_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
set GLOBAL storage_engine=DEFAULT;
...
...
ndb/src/kernel/blocks/ERROR_codes.txt
View file @
c1d6b29c
...
...
@@ -10,7 +10,7 @@ Next DBTC 8035
Next CMVMI 9000
Next BACKUP 10022
Next DBUTIL 11002
Next DBTUX 1200
7
Next DBTUX 1200
8
Next SUMA 13001
TESTING NODE FAILURE, ARBITRATION
...
...
@@ -443,6 +443,7 @@ Test routing of signals:
Ordered index:
--------------
12007: Make next alloc node fail with no memory error
Dbdict:
-------
...
...
ndb/src/kernel/blocks/dbtup/Dbtup.hpp
View file @
c1d6b29c
...
...
@@ -1779,6 +1779,10 @@ private:
Operationrec
*
const
regOperPtr
,
Tablerec
*
const
regTabPtr
);
int
addTuxEntries
(
Signal
*
signal
,
Operationrec
*
regOperPtr
,
Tablerec
*
regTabPtr
);
// these crash the node on error
void
executeTuxCommitTriggers
(
Signal
*
signal
,
...
...
@@ -1789,6 +1793,10 @@ private:
Operationrec
*
regOperPtr
,
Tablerec
*
const
regTabPtr
);
void
removeTuxEntries
(
Signal
*
signal
,
Operationrec
*
regOperPtr
,
Tablerec
*
regTabPtr
);
// *****************************************************************
// Error Handling routines.
// *****************************************************************
...
...
ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
View file @
c1d6b29c
...
...
@@ -973,25 +973,7 @@ Dbtup::executeTuxInsertTriggers(Signal* signal,
req
->
pageOffset
=
regOperPtr
->
pageOffset
;
req
->
tupVersion
=
tupVersion
;
req
->
opInfo
=
TuxMaintReq
::
OpAdd
;
// loop over index list
const
ArrayList
<
TupTriggerData
>&
triggerList
=
regTabPtr
->
tuxCustomTriggers
;
TriggerPtr
triggerPtr
;
triggerList
.
first
(
triggerPtr
);
while
(
triggerPtr
.
i
!=
RNIL
)
{
ljam
();
req
->
indexId
=
triggerPtr
.
p
->
indexId
;
req
->
errorCode
=
RNIL
;
EXECUTE_DIRECT
(
DBTUX
,
GSN_TUX_MAINT_REQ
,
signal
,
TuxMaintReq
::
SignalLength
);
ljamEntry
();
if
(
req
->
errorCode
!=
0
)
{
ljam
();
terrorCode
=
req
->
errorCode
;
return
-
1
;
}
triggerList
.
next
(
triggerPtr
);
}
return
0
;
return
addTuxEntries
(
signal
,
regOperPtr
,
regTabPtr
);
}
int
...
...
@@ -1012,9 +994,18 @@ Dbtup::executeTuxUpdateTriggers(Signal* signal,
req
->
pageOffset
=
regOperPtr
->
pageOffset
;
req
->
tupVersion
=
tupVersion
;
req
->
opInfo
=
TuxMaintReq
::
OpAdd
;
// loop over index list
return
addTuxEntries
(
signal
,
regOperPtr
,
regTabPtr
);
}
int
Dbtup
::
addTuxEntries
(
Signal
*
signal
,
Operationrec
*
regOperPtr
,
Tablerec
*
regTabPtr
)
{
TuxMaintReq
*
const
req
=
(
TuxMaintReq
*
)
signal
->
getDataPtrSend
();
const
ArrayList
<
TupTriggerData
>&
triggerList
=
regTabPtr
->
tuxCustomTriggers
;
TriggerPtr
triggerPtr
;
Uint32
failPtrI
;
triggerList
.
first
(
triggerPtr
);
while
(
triggerPtr
.
i
!=
RNIL
)
{
ljam
();
...
...
@@ -1026,11 +1017,29 @@ Dbtup::executeTuxUpdateTriggers(Signal* signal,
if
(
req
->
errorCode
!=
0
)
{
ljam
();
terrorCode
=
req
->
errorCode
;
return
-
1
;
failPtrI
=
triggerPtr
.
i
;
goto
fail
;
}
triggerList
.
next
(
triggerPtr
);
}
return
0
;
fail:
req
->
opInfo
=
TuxMaintReq
::
OpRemove
;
triggerList
.
first
(
triggerPtr
);
while
(
triggerPtr
.
i
!=
failPtrI
)
{
ljam
();
req
->
indexId
=
triggerPtr
.
p
->
indexId
;
req
->
errorCode
=
RNIL
;
EXECUTE_DIRECT
(
DBTUX
,
GSN_TUX_MAINT_REQ
,
signal
,
TuxMaintReq
::
SignalLength
);
ljamEntry
();
ndbrequire
(
req
->
errorCode
==
0
);
triggerList
.
next
(
triggerPtr
);
}
#ifdef VM_TRACE
ndbout
<<
"aborted partial tux update: op "
<<
hex
<<
regOperPtr
<<
endl
;
#endif
return
-
1
;
}
int
...
...
@@ -1049,7 +1058,6 @@ Dbtup::executeTuxCommitTriggers(Signal* signal,
{
TuxMaintReq
*
const
req
=
(
TuxMaintReq
*
)
signal
->
getDataPtrSend
();
// get version
// XXX could add prevTupVersion to Operationrec
Uint32
tupVersion
;
if
(
regOperPtr
->
optype
==
ZINSERT
)
{
if
(
!
regOperPtr
->
deleteInsertFlag
)
...
...
@@ -1087,21 +1095,7 @@ Dbtup::executeTuxCommitTriggers(Signal* signal,
req
->
pageOffset
=
regOperPtr
->
pageOffset
;
req
->
tupVersion
=
tupVersion
;
req
->
opInfo
=
TuxMaintReq
::
OpRemove
;
// loop over index list
const
ArrayList
<
TupTriggerData
>&
triggerList
=
regTabPtr
->
tuxCustomTriggers
;
TriggerPtr
triggerPtr
;
triggerList
.
first
(
triggerPtr
);
while
(
triggerPtr
.
i
!=
RNIL
)
{
ljam
();
req
->
indexId
=
triggerPtr
.
p
->
indexId
;
req
->
errorCode
=
RNIL
;
EXECUTE_DIRECT
(
DBTUX
,
GSN_TUX_MAINT_REQ
,
signal
,
TuxMaintReq
::
SignalLength
);
ljamEntry
();
// commit must succeed
ndbrequire
(
req
->
errorCode
==
0
);
triggerList
.
next
(
triggerPtr
);
}
removeTuxEntries
(
signal
,
regOperPtr
,
regTabPtr
);
}
void
...
...
@@ -1132,7 +1126,15 @@ Dbtup::executeTuxAbortTriggers(Signal* signal,
req
->
pageOffset
=
regOperPtr
->
pageOffset
;
req
->
tupVersion
=
tupVersion
;
req
->
opInfo
=
TuxMaintReq
::
OpRemove
;
// loop over index list
removeTuxEntries
(
signal
,
regOperPtr
,
regTabPtr
);
}
void
Dbtup
::
removeTuxEntries
(
Signal
*
signal
,
Operationrec
*
regOperPtr
,
Tablerec
*
regTabPtr
)
{
TuxMaintReq
*
const
req
=
(
TuxMaintReq
*
)
signal
->
getDataPtrSend
();
const
ArrayList
<
TupTriggerData
>&
triggerList
=
regTabPtr
->
tuxCustomTriggers
;
TriggerPtr
triggerPtr
;
triggerList
.
first
(
triggerPtr
);
...
...
@@ -1143,7 +1145,7 @@ Dbtup::executeTuxAbortTriggers(Signal* signal,
EXECUTE_DIRECT
(
DBTUX
,
GSN_TUX_MAINT_REQ
,
signal
,
TuxMaintReq
::
SignalLength
);
ljamEntry
();
//
abort
must succeed
// must succeed
ndbrequire
(
req
->
errorCode
==
0
);
triggerList
.
next
(
triggerPtr
);
}
...
...
ndb/src/kernel/blocks/dbtup/Notes.txt
View file @
c1d6b29c
...
...
@@ -135,6 +135,24 @@ abort DELETE none -
1) alternatively, store prevTupVersion in operation record.
Abort from ordered index error
------------------------------
Obviously, index update failure causes operation failure.
The operation is then aborted later by TC.
The problem here is with multiple indexes. Some may have been
updated successfully before the one that failed. Therefore
the trigger code aborts the successful ones already in
the prepare phase.
In other words, multiple indexes are treated as one.
Abort from any cause
--------------------
[ hairy stuff ]
Read attributes, query status
-----------------------------
...
...
@@ -170,14 +188,11 @@ used to decide if the scan can see the tuple.
This signal may also be called during any phase since commit/abort
of all operations is not done in one time-slice.
Commit and abort
----------------
[ hairy stuff ]
Problems
--------
Current abort code can destroy a tuple version too early. This
happens in test case "ticuur" (insert-commit-update-update-rollback),
if abort of first update arrives before abort of second update.
vim: set textwidth=68:
ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
View file @
c1d6b29c
...
...
@@ -23,6 +23,11 @@
int
Dbtux
::
allocNode
(
Signal
*
signal
,
NodeHandle
&
node
)
{
if
(
ERROR_INSERTED
(
12007
))
{
jam
();
CLEAR_ERROR_INSERT_VALUE
;
return
TuxMaintReq
::
NoMemError
;
}
Frag
&
frag
=
node
.
m_frag
;
Uint32
pageId
=
NullTupLoc
.
getPageId
();
Uint32
pageOffset
=
NullTupLoc
.
getPageOffset
();
...
...
@@ -34,6 +39,12 @@ Dbtux::allocNode(Signal* signal, NodeHandle& node)
node
.
m_loc
=
TupLoc
(
pageId
,
pageOffset
);
node
.
m_node
=
reinterpret_cast
<
TreeNode
*>
(
node32
);
ndbrequire
(
node
.
m_loc
!=
NullTupLoc
&&
node
.
m_node
!=
0
);
}
else
{
switch
(
errorCode
)
{
case
827
:
errorCode
=
TuxMaintReq
::
NoMemError
;
break
;
}
}
return
errorCode
;
}
...
...
ndb/src/ndbapi/ndberror.c
View file @
c1d6b29c
...
...
@@ -179,11 +179,11 @@ ErrorBundle ErrorCodes[] = {
*/
{
623
,
IS
,
"623"
},
{
624
,
IS
,
"624"
},
{
625
,
IS
,
"Out of memory in Ndb Kernel, index part (increase IndexMemory)"
},
{
625
,
IS
,
"Out of memory in Ndb Kernel,
hash
index part (increase IndexMemory)"
},
{
640
,
IS
,
"Too many hash indexes (should not happen)"
},
{
826
,
IS
,
"Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)"
},
{
827
,
IS
,
"Out of memory in Ndb Kernel,
data part
(increase DataMemory)"
},
{
902
,
IS
,
"Out of memory in Ndb Kernel,
data part
(increase DataMemory)"
},
{
827
,
IS
,
"Out of memory in Ndb Kernel,
table data
(increase DataMemory)"
},
{
902
,
IS
,
"Out of memory in Ndb Kernel,
ordered index data
(increase DataMemory)"
},
{
903
,
IS
,
"Too many ordered indexes (increase MaxNoOfOrderedIndexes)"
},
{
904
,
IS
,
"Out of fragment records (increase MaxNoOfOrderedIndexes)"
},
{
905
,
IS
,
"Out of attribute records (increase MaxNoOfAttributes)"
},
...
...
ndb/test/ndbapi/testOIBasic.cpp
View file @
c1d6b29c
...
...
@@ -164,6 +164,16 @@ irandom(unsigned n)
return
i
;
}
static
bool
randompct
(
unsigned
pct
)
{
if
(
pct
==
0
)
return
false
;
if
(
pct
>=
100
)
return
true
;
return
urandom
(
100
)
<
pct
;
}
// log and error macros
static
NdbMutex
*
ndbout_mutex
=
NULL
;
...
...
@@ -259,6 +269,8 @@ struct Par : public Opt {
bool
m_verify
;
// deadlock possible
bool
m_deadlock
;
// abort percentabge
unsigned
m_abortpct
;
NdbOperation
::
LockMode
m_lockmode
;
// ordered range scan
bool
m_ordered
;
...
...
@@ -281,6 +293,7 @@ struct Par : public Opt {
m_randomkey
(
false
),
m_verify
(
false
),
m_deadlock
(
false
),
m_abortpct
(
0
),
m_lockmode
(
NdbOperation
::
LM_Read
),
m_ordered
(
false
),
m_descending
(
false
)
{
...
...
@@ -1143,7 +1156,7 @@ struct Con {
NdbScanFilter
*
m_scanfilter
;
enum
ScanMode
{
ScanNo
=
0
,
Committed
,
Latest
,
Exclusive
};
ScanMode
m_scanmode
;
enum
ErrType
{
ErrNone
=
0
,
ErrDeadlock
,
ErrOther
};
enum
ErrType
{
ErrNone
=
0
,
ErrDeadlock
,
Err
Nospace
,
Err
Other
};
ErrType
m_errtype
;
Con
()
:
m_ndb
(
0
),
m_dic
(
0
),
m_tx
(
0
),
m_op
(
0
),
m_indexop
(
0
),
...
...
@@ -1172,7 +1185,7 @@ struct Con {
int
endFilter
();
int
setFilter
(
int
num
,
int
cond
,
const
void
*
value
,
unsigned
len
);
int
execute
(
ExecType
t
);
int
execute
(
ExecType
t
,
bool
&
deadlock
);
int
execute
(
ExecType
t
,
bool
&
deadlock
,
bool
&
nospace
);
int
readTuples
(
Par
par
);
int
readIndexTuples
(
Par
par
);
int
executeScan
();
...
...
@@ -1354,17 +1367,21 @@ Con::execute(ExecType t)
}
int
Con
::
execute
(
ExecType
t
,
bool
&
deadlock
)
Con
::
execute
(
ExecType
t
,
bool
&
deadlock
,
bool
&
nospace
)
{
int
ret
=
execute
(
t
);
if
(
ret
!=
0
)
{
if
(
deadlock
&&
m_errtype
==
ErrDeadlock
)
{
LL3
(
"caught deadlock"
);
ret
=
0
;
}
if
(
ret
!=
0
&&
deadlock
&&
m_errtype
==
ErrDeadlock
)
{
LL3
(
"caught deadlock"
);
ret
=
0
;
}
else
{
deadlock
=
false
;
}
if
(
ret
!=
0
&&
nospace
&&
m_errtype
==
ErrNospace
)
{
LL3
(
"caught nospace"
);
ret
=
0
;
}
else
{
nospace
=
false
;
}
CHK
(
ret
==
0
);
return
0
;
}
...
...
@@ -1475,6 +1492,8 @@ Con::printerror(NdbOut& out)
// 631 is new, occurs only on 4 db nodes, needs to be checked out
if
(
code
==
266
||
code
==
274
||
code
==
296
||
code
==
297
||
code
==
499
||
code
==
631
)
m_errtype
=
ErrDeadlock
;
if
(
code
==
826
||
code
==
827
||
code
==
902
)
m_errtype
=
ErrNospace
;
}
if
(
m_op
&&
m_op
->
getNdbError
().
code
!=
0
)
{
LL0
(
++
any
<<
" op : error "
<<
m_op
->
getNdbError
());
...
...
@@ -2480,8 +2499,8 @@ struct Set {
void
dbsave
(
unsigned
i
);
void
calc
(
Par
par
,
unsigned
i
,
unsigned
mask
=
0
);
bool
pending
(
unsigned
i
,
unsigned
mask
)
const
;
void
notpending
(
unsigned
i
);
void
notpending
(
const
Lst
&
lst
);
void
notpending
(
unsigned
i
,
ExecType
et
=
Commit
);
void
notpending
(
const
Lst
&
lst
,
ExecType
et
=
Commit
);
void
dbdiscard
(
unsigned
i
);
void
dbdiscard
(
const
Lst
&
lst
);
const
Row
&
dbrow
(
unsigned
i
)
const
;
...
...
@@ -2620,26 +2639,30 @@ Set::pending(unsigned i, unsigned mask) const
}
void
Set
::
notpending
(
unsigned
i
)
Set
::
notpending
(
unsigned
i
,
ExecType
et
)
{
assert
(
m_row
[
i
]
!=
0
);
Row
&
row
=
*
m_row
[
i
];
if
(
row
.
m_pending
==
Row
::
InsOp
)
{
row
.
m_exist
=
true
;
}
else
if
(
row
.
m_pending
==
Row
::
UpdOp
)
{
;
}
else
if
(
row
.
m_pending
==
Row
::
DelOp
)
{
row
.
m_exist
=
false
;
if
(
et
==
Commit
)
{
if
(
row
.
m_pending
==
Row
::
InsOp
)
row
.
m_exist
=
true
;
if
(
row
.
m_pending
==
Row
::
DelOp
)
row
.
m_exist
=
false
;
}
else
{
if
(
row
.
m_pending
==
Row
::
InsOp
)
row
.
m_exist
=
false
;
if
(
row
.
m_pending
==
Row
::
DelOp
)
row
.
m_exist
=
true
;
}
row
.
m_pending
=
Row
::
NoOp
;
}
void
Set
::
notpending
(
const
Lst
&
lst
)
Set
::
notpending
(
const
Lst
&
lst
,
ExecType
et
)
{
for
(
unsigned
j
=
0
;
j
<
lst
.
m_cnt
;
j
++
)
{
unsigned
i
=
lst
.
m_arr
[
j
];
notpending
(
i
);
notpending
(
i
,
et
);
}
}
...
...
@@ -2831,8 +2854,6 @@ Set::putval(unsigned i, bool force, unsigned n)
return
0
;
}
// verify
int
Set
::
verify
(
Par
par
,
const
Set
&
set2
)
const
{
...
...
@@ -3213,14 +3234,20 @@ pkinsert(Par par)
lst
.
push
(
i
);
if
(
lst
.
cnt
()
==
par
.
m_batch
)
{
bool
deadlock
=
par
.
m_deadlock
;
CHK
(
con
.
execute
(
Commit
,
deadlock
)
==
0
);
bool
nospace
=
true
;
ExecType
et
=
randompct
(
par
.
m_abortpct
)
?
Rollback
:
Commit
;
CHK
(
con
.
execute
(
et
,
deadlock
,
nospace
)
==
0
);
con
.
closeTransaction
();
if
(
deadlock
)
{
LL1
(
"pkinsert: stop on deadlock [at 1]"
);
return
0
;
}
if
(
nospace
)
{
LL1
(
"pkinsert: cnt="
<<
j
<<
" stop on nospace"
);
return
0
;
}
set
.
lock
();
set
.
notpending
(
lst
);
set
.
notpending
(
lst
,
et
);
set
.
unlock
();
lst
.
reset
();
CHK
(
con
.
startTransaction
()
==
0
);
...
...
@@ -3228,14 +3255,20 @@ pkinsert(Par par)
}
if
(
lst
.
cnt
()
!=
0
)
{
bool
deadlock
=
par
.
m_deadlock
;
CHK
(
con
.
execute
(
Commit
,
deadlock
)
==
0
);
bool
nospace
=
true
;
ExecType
et
=
randompct
(
par
.
m_abortpct
)
?
Rollback
:
Commit
;
CHK
(
con
.
execute
(
et
,
deadlock
,
nospace
)
==
0
);
con
.
closeTransaction
();
if
(
deadlock
)
{
LL1
(
"pkinsert: stop on deadlock [at 2]"
);
return
0
;
}
if
(
nospace
)
{
LL1
(
"pkinsert: end: stop on nospace"
);
return
0
;
}
set
.
lock
();
set
.
notpending
(
lst
);
set
.
notpending
(
lst
,
et
);
set
.
unlock
();
return
0
;
}
...
...
@@ -3253,6 +3286,7 @@ pkupdate(Par par)
CHK
(
con
.
startTransaction
()
==
0
);
Lst
lst
;
bool
deadlock
=
false
;
bool
nospace
=
false
;
for
(
unsigned
j
=
0
;
j
<
par
.
m_rows
;
j
++
)
{
unsigned
j2
=
!
par
.
m_randomkey
?
j
:
urandom
(
par
.
m_rows
);
unsigned
i
=
thrrow
(
par
,
j2
);
...
...
@@ -3269,28 +3303,38 @@ pkupdate(Par par)
lst
.
push
(
i
);
if
(
lst
.
cnt
()
==
par
.
m_batch
)
{
deadlock
=
par
.
m_deadlock
;
CHK
(
con
.
execute
(
Commit
,
deadlock
)
==
0
);
nospace
=
true
;
ExecType
et
=
randompct
(
par
.
m_abortpct
)
?
Rollback
:
Commit
;
CHK
(
con
.
execute
(
et
,
deadlock
,
nospace
)
==
0
);
if
(
deadlock
)
{
LL1
(
"pkupdate: stop on deadlock [at 1]"
);
break
;
}
if
(
nospace
)
{
LL1
(
"pkupdate: cnt="
<<
j
<<
" stop on nospace [at 1]"
);
break
;
}
con
.
closeTransaction
();
set
.
lock
();
set
.
notpending
(
lst
);
set
.
notpending
(
lst
,
et
);
set
.
dbdiscard
(
lst
);
set
.
unlock
();
lst
.
reset
();
CHK
(
con
.
startTransaction
()
==
0
);
}
}
if
(
!
deadlock
&&
lst
.
cnt
()
!=
0
)
{
if
(
!
deadlock
&&
!
nospace
&&
lst
.
cnt
()
!=
0
)
{
deadlock
=
par
.
m_deadlock
;
CHK
(
con
.
execute
(
Commit
,
deadlock
)
==
0
);
nospace
=
true
;
ExecType
et
=
randompct
(
par
.
m_abortpct
)
?
Rollback
:
Commit
;
CHK
(
con
.
execute
(
et
,
deadlock
,
nospace
)
==
0
);
if
(
deadlock
)
{
LL1
(
"pkupdate: stop on deadlock [at 1]"
);
LL1
(
"pkupdate: stop on deadlock [at 2]"
);
}
else
if
(
nospace
)
{
LL1
(
"pkupdate: end: stop on nospace [at 2]"
);
}
else
{
set
.
lock
();
set
.
notpending
(
lst
);
set
.
notpending
(
lst
,
et
);
set
.
dbdiscard
(
lst
);
set
.
unlock
();
}
...
...
@@ -3309,6 +3353,7 @@ pkdelete(Par par)
CHK
(
con
.
startTransaction
()
==
0
);
Lst
lst
;
bool
deadlock
=
false
;
bool
nospace
=
false
;
for
(
unsigned
j
=
0
;
j
<
par
.
m_rows
;
j
++
)
{
unsigned
j2
=
!
par
.
m_randomkey
?
j
:
urandom
(
par
.
m_rows
);
unsigned
i
=
thrrow
(
par
,
j2
);
...
...
@@ -3323,27 +3368,31 @@ pkdelete(Par par)
lst
.
push
(
i
);
if
(
lst
.
cnt
()
==
par
.
m_batch
)
{
deadlock
=
par
.
m_deadlock
;
CHK
(
con
.
execute
(
Commit
,
deadlock
)
==
0
);
nospace
=
true
;
ExecType
et
=
randompct
(
par
.
m_abortpct
)
?
Rollback
:
Commit
;
CHK
(
con
.
execute
(
et
,
deadlock
,
nospace
)
==
0
);
if
(
deadlock
)
{
LL1
(
"pkdelete: stop on deadlock [at 1]"
);
break
;
}
con
.
closeTransaction
();
set
.
lock
();
set
.
notpending
(
lst
);
set
.
notpending
(
lst
,
et
);
set
.
unlock
();
lst
.
reset
();
CHK
(
con
.
startTransaction
()
==
0
);
}
}
if
(
!
deadlock
&&
lst
.
cnt
()
!=
0
)
{
if
(
!
deadlock
&&
!
nospace
&&
lst
.
cnt
()
!=
0
)
{
deadlock
=
par
.
m_deadlock
;
CHK
(
con
.
execute
(
Commit
,
deadlock
)
==
0
);
nospace
=
true
;
ExecType
et
=
randompct
(
par
.
m_abortpct
)
?
Rollback
:
Commit
;
CHK
(
con
.
execute
(
et
,
deadlock
,
nospace
)
==
0
);
if
(
deadlock
)
{
LL1
(
"pkdelete: stop on deadlock [at 2]"
);
}
else
{
set
.
lock
();
set
.
notpending
(
lst
);
set
.
notpending
(
lst
,
et
);
set
.
unlock
();
}
}
...
...
@@ -3418,6 +3467,7 @@ hashindexupdate(Par par, const ITab& itab)
CHK
(
con
.
startTransaction
()
==
0
);
Lst
lst
;
bool
deadlock
=
false
;
bool
nospace
=
false
;
for
(
unsigned
j
=
0
;
j
<
par
.
m_rows
;
j
++
)
{
unsigned
j2
=
!
par
.
m_randomkey
?
j
:
urandom
(
par
.
m_rows
);
unsigned
i
=
thrrow
(
par
,
j2
);
...
...
@@ -3435,7 +3485,7 @@ hashindexupdate(Par par, const ITab& itab)
lst
.
push
(
i
);
if
(
lst
.
cnt
()
==
par
.
m_batch
)
{
deadlock
=
par
.
m_deadlock
;
CHK
(
con
.
execute
(
Commit
,
deadlock
)
==
0
);
CHK
(
con
.
execute
(
Commit
,
deadlock
,
nospace
)
==
0
);
if
(
deadlock
)
{
LL1
(
"hashindexupdate: stop on deadlock [at 1]"
);
break
;
...
...
@@ -3451,9 +3501,9 @@ hashindexupdate(Par par, const ITab& itab)
}
if
(
!
deadlock
&&
lst
.
cnt
()
!=
0
)
{
deadlock
=
par
.
m_deadlock
;
CHK
(
con
.
execute
(
Commit
,
deadlock
)
==
0
);
CHK
(
con
.
execute
(
Commit
,
deadlock
,
nospace
)
==
0
);
if
(
deadlock
)
{
LL1
(
"hashindexupdate: stop on deadlock [at
1
]"
);
LL1
(
"hashindexupdate: stop on deadlock [at
2
]"
);
}
else
{
set
.
lock
();
set
.
notpending
(
lst
);
...
...
@@ -3474,6 +3524,7 @@ hashindexdelete(Par par, const ITab& itab)
CHK
(
con
.
startTransaction
()
==
0
);
Lst
lst
;
bool
deadlock
=
false
;
bool
nospace
=
false
;
for
(
unsigned
j
=
0
;
j
<
par
.
m_rows
;
j
++
)
{
unsigned
j2
=
!
par
.
m_randomkey
?
j
:
urandom
(
par
.
m_rows
);
unsigned
i
=
thrrow
(
par
,
j2
);
...
...
@@ -3488,7 +3539,7 @@ hashindexdelete(Par par, const ITab& itab)
lst
.
push
(
i
);
if
(
lst
.
cnt
()
==
par
.
m_batch
)
{
deadlock
=
par
.
m_deadlock
;
CHK
(
con
.
execute
(
Commit
,
deadlock
)
==
0
);
CHK
(
con
.
execute
(
Commit
,
deadlock
,
nospace
)
==
0
);
if
(
deadlock
)
{
LL1
(
"hashindexdelete: stop on deadlock [at 1]"
);
break
;
...
...
@@ -3503,7 +3554,7 @@ hashindexdelete(Par par, const ITab& itab)
}
if
(
!
deadlock
&&
lst
.
cnt
()
!=
0
)
{
deadlock
=
par
.
m_deadlock
;
CHK
(
con
.
execute
(
Commit
,
deadlock
)
==
0
);
CHK
(
con
.
execute
(
Commit
,
deadlock
,
nospace
)
==
0
);
if
(
deadlock
)
{
LL1
(
"hashindexdelete: stop on deadlock [at 2]"
);
}
else
{
...
...
@@ -3875,6 +3926,7 @@ scanupdatetable(Par par)
CHK
(
con2
.
startTransaction
()
==
0
);
Lst
lst
;
bool
deadlock
=
false
;
bool
nospace
=
false
;
while
(
1
)
{
int
ret
;
deadlock
=
par
.
m_deadlock
;
...
...
@@ -3910,7 +3962,7 @@ scanupdatetable(Par par)
set
.
unlock
();
if
(
lst
.
cnt
()
==
par
.
m_batch
)
{
deadlock
=
par
.
m_deadlock
;
CHK
(
con2
.
execute
(
Commit
,
deadlock
)
==
0
);
CHK
(
con2
.
execute
(
Commit
,
deadlock
,
nospace
)
==
0
);
if
(
deadlock
)
{
LL1
(
"scanupdatetable: stop on deadlock [at 2]"
);
goto
out
;
...
...
@@ -3927,7 +3979,7 @@ scanupdatetable(Par par)
CHK
((
ret
=
con
.
nextScanResult
(
false
))
==
0
||
ret
==
1
||
ret
==
2
);
if
(
ret
==
2
&&
lst
.
cnt
()
!=
0
)
{
deadlock
=
par
.
m_deadlock
;
CHK
(
con2
.
execute
(
Commit
,
deadlock
)
==
0
);
CHK
(
con2
.
execute
(
Commit
,
deadlock
,
nospace
)
==
0
);
if
(
deadlock
)
{
LL1
(
"scanupdatetable: stop on deadlock [at 3]"
);
goto
out
;
...
...
@@ -3974,6 +4026,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
CHK
(
con2
.
startTransaction
()
==
0
);
Lst
lst
;
bool
deadlock
=
false
;
bool
nospace
=
false
;
while
(
1
)
{
int
ret
;
deadlock
=
par
.
m_deadlock
;
...
...
@@ -4009,7 +4062,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
set
.
unlock
();
if
(
lst
.
cnt
()
==
par
.
m_batch
)
{
deadlock
=
par
.
m_deadlock
;
CHK
(
con2
.
execute
(
Commit
,
deadlock
)
==
0
);
CHK
(
con2
.
execute
(
Commit
,
deadlock
,
nospace
)
==
0
);
if
(
deadlock
)
{
LL1
(
"scanupdateindex: stop on deadlock [at 2]"
);
goto
out
;
...
...
@@ -4026,7 +4079,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
CHK
((
ret
=
con
.
nextScanResult
(
false
))
==
0
||
ret
==
1
||
ret
==
2
);
if
(
ret
==
2
&&
lst
.
cnt
()
!=
0
)
{
deadlock
=
par
.
m_deadlock
;
CHK
(
con2
.
execute
(
Commit
,
deadlock
)
==
0
);
CHK
(
con2
.
execute
(
Commit
,
deadlock
,
nospace
)
==
0
);
if
(
deadlock
)
{
LL1
(
"scanupdateindex: stop on deadlock [at 3]"
);
goto
out
;
...
...
@@ -4094,6 +4147,10 @@ readverify(Par par)
if
(
par
.
m_noverify
)
return
0
;
par
.
m_verify
=
true
;
if
(
par
.
m_abortpct
!=
0
)
{
LL2
(
"skip verify in this version"
);
// implement in 5.0 version
par
.
m_verify
=
false
;
}
par
.
m_lockmode
=
NdbOperation
::
LM_CommittedRead
;
CHK
(
pkread
(
par
)
==
0
);
CHK
(
scanreadall
(
par
)
==
0
);
...
...
@@ -4106,6 +4163,10 @@ readverifyfull(Par par)
if
(
par
.
m_noverify
)
return
0
;
par
.
m_verify
=
true
;
if
(
par
.
m_abortpct
!=
0
)
{
LL2
(
"skip verify in this version"
);
// implement in 5.0 version
par
.
m_verify
=
false
;
}
par
.
m_lockmode
=
NdbOperation
::
LM_CommittedRead
;
const
Tab
&
tab
=
par
.
tab
();
if
(
par
.
m_no
==
0
)
{
...
...
@@ -4457,11 +4518,11 @@ runstep(Par par, const char* fname, TFunc func, unsigned mode)
for
(
n
=
0
;
n
<
threads
;
n
++
)
{
LL4
(
"start "
<<
n
);
Thr
&
thr
=
*
g_thrlist
[
n
];
thr
.
m_par
.
m_tab
=
par
.
m_tab
;
thr
.
m_par
.
m_set
=
par
.
m_set
;
thr
.
m_par
.
m_tmr
=
par
.
m_tm
r
;
thr
.
m_par
.
m_
lno
=
par
.
m_l
no
;
thr
.
m_par
.
m_
slno
=
par
.
m_slno
;
Par
oldpar
=
thr
.
m_par
;
// update parameters
thr
.
m_par
=
pa
r
;
thr
.
m_par
.
m_
no
=
oldpar
.
m_
no
;
thr
.
m_par
.
m_
con
=
oldpar
.
m_con
;
thr
.
m_func
=
func
;
thr
.
start
();
}
...
...
@@ -4590,6 +4651,24 @@ tbusybuild(Par par)
return
0
;
}
static
int
trollback
(
Par
par
)
{
par
.
m_abortpct
=
50
;
RUNSTEP
(
par
,
droptable
,
ST
);
RUNSTEP
(
par
,
createtable
,
ST
);
RUNSTEP
(
par
,
invalidatetable
,
MT
);
RUNSTEP
(
par
,
pkinsert
,
MT
);
RUNSTEP
(
par
,
createindex
,
ST
);
RUNSTEP
(
par
,
invalidateindex
,
MT
);
RUNSTEP
(
par
,
readverify
,
ST
);
for
(
par
.
m_slno
=
0
;
par
.
m_slno
<
par
.
m_subloop
;
par
.
m_slno
++
)
{
RUNSTEP
(
par
,
mixedoperations
,
MT
);
RUNSTEP
(
par
,
readverify
,
ST
);
}
return
0
;
}
static
int
ttimebuild
(
Par
par
)
{
...
...
@@ -4712,6 +4791,7 @@ tcaselist[] = {
TCase
(
"d"
,
tpkopsread
,
"pk operations and scan reads"
),
TCase
(
"e"
,
tmixedops
,
"pk operations and scan operations"
),
TCase
(
"f"
,
tbusybuild
,
"pk operations and index build"
),
TCase
(
"g"
,
trollback
,
"operations with random rollbacks"
),
TCase
(
"t"
,
ttimebuild
,
"time index build"
),
TCase
(
"u"
,
ttimemaint
,
"time index maintenance"
),
TCase
(
"v"
,
ttimescan
,
"time full scan table vs index on pk"
),
...
...
scripts/mysql_tableinfo.sh
View file @
c1d6b29c
...
...
@@ -6,6 +6,14 @@ use DBI;
=
head1 NAME
WARNING: MySQL versions 5.0 and above feature the INFORMATION_SCHEMA
pseudo-database which contains always up-to-date metadata information
about all tables. So instead of using this script one can now
simply query the INFORMATION_SCHEMA.SCHEMATA, INFORMATION_SCHEMA.TABLES,
INFORMATION_SCHEMA.COLUMNS, INFORMATION_SCHEMA.STATISTICS pseudo-tables.
Please see the MySQL manual
for
more information about INFORMATION_SCHEMA.
This script will be removed from the MySQL distribution
in
version 5.1.
mysql_tableinfo - creates and populates information tables with
the output of SHOW DATABASES, SHOW TABLES
(
or SHOW TABLE STATUS
)
,
SHOW COLUMNS and SHOW INDEX.
...
...
@@ -62,6 +70,19 @@ GetOptions( \%opt,
"quiet|q"
,
)
or usage
(
"Invalid option"
)
;
if
(!
$opt
{
'quiet'
})
{
print
<<
EOF
WARNING: MySQL versions 5.0 and above feature the INFORMATION_SCHEMA
pseudo-database which contains always up-to-date metadata information
about all tables. So instead of using this script one can now
simply query the INFORMATION_SCHEMA.SCHEMATA, INFORMATION_SCHEMA.TABLES,
INFORMATION_SCHEMA.COLUMNS, INFORMATION_SCHEMA.STATISTICS pseudo-tables.
Please see the MySQL manual for more information about INFORMATION_SCHEMA.
This script will be removed from the MySQL distribution in version 5.1.
EOF
}
if
(
$opt
{
'help'
})
{
usage
()
;
}
my
(
$db_to_write
,
$db_like_wild
,
$tbl_like_wild
)
;
...
...
@@ -104,7 +125,7 @@ $tbl_like_wild=$dbh->quote($tbl_like_wild);
if
(!
$opt
{
'quiet'
})
{
print
"
\n
!! This program is
d
oing to do:
\n\n
"
;
print
"
\n
!! This program is
g
oing to do:
\n\n
"
;
print
"**DROP** TABLE ...
\n
"
if
(
$opt
{
'clear'
}
or
$opt
{
'clear-only'
})
;
print
"**DELETE** FROM ... WHERE
`
Database
`
LIKE
$db_like_wild
AND
`
Table
`
LIKE
$tbl_like_wild
**INSERT** INTO ...
...
...
@@ -456,17 +477,14 @@ UNIX domain socket to use when connecting to server
=head1 WARRANTY
This software is free and comes without warranty of any kind. You
should never trust backup software without studying the code yourself.
Study the code inside this script and only rely on it if I<you> believe
that it does the right thing for you.
This software is free and comes without warranty of any kind.
Patches adding bug fixes, documentation and new features are welcome.
=head1 TO DO
Use extended inserts to be faster (for servers with many databases
or tables). But to do that, must care about net-buffer-length
.
Nothing: starting from MySQL 5.0, this program is replaced by the
INFORMATION_SCHEMA pseudo-database
.
=head1 AUTHOR
...
...
sql/handler.cc
View file @
c1d6b29c
...
...
@@ -70,10 +70,10 @@ struct show_table_type_st sys_table_types[]=
{
{
"MyISAM"
,
&
have_yes
,
"Default engine as of MySQL 3.23 with great performance"
,
DB_TYPE_MYISAM
},
{
"HEAP"
,
&
have_yes
,
"Alias for MEMORY"
,
DB_TYPE_HEAP
},
{
"MEMORY"
,
&
have_yes
,
"Hash based, stored in memory, useful for temporary tables"
,
DB_TYPE_HEAP
},
{
"HEAP"
,
&
have_yes
,
"Alias for MEMORY"
,
DB_TYPE_HEAP
},
{
"MERGE"
,
&
have_yes
,
"Collection of identical MyISAM tables"
,
DB_TYPE_MRG_MYISAM
},
{
"MRG_MYISAM"
,
&
have_yes
,
...
...
sql/log.cc
View file @
c1d6b29c
...
...
@@ -2857,6 +2857,13 @@ int TC_LOG_BINLOG::open(const char *opt_name)
pthread_mutex_init
(
&
LOCK_prep_xids
,
MY_MUTEX_INIT_FAST
);
pthread_cond_init
(
&
COND_prep_xids
,
0
);
if
(
!
my_b_inited
(
&
index_file
))
{
/* There was a failure to open the index file, can't open the binlog */
cleanup
();
return
1
;
}
if
(
using_heuristic_recover
())
{
/* generate a new binlog to mask a corrupted one */
...
...
sql/set_var.cc
View file @
c1d6b29c
...
...
@@ -847,7 +847,6 @@ struct show_var_st init_vars[]= {
{
"log_slave_updates"
,
(
char
*
)
&
opt_log_slave_updates
,
SHOW_MY_BOOL
},
#endif
{
"log_slow_queries"
,
(
char
*
)
&
opt_slow_log
,
SHOW_BOOL
},
{
"log_update"
,
(
char
*
)
&
opt_update_log
,
SHOW_BOOL
},
{
sys_log_warnings
.
name
,
(
char
*
)
&
sys_log_warnings
,
SHOW_SYS
},
{
sys_long_query_time
.
name
,
(
char
*
)
&
sys_long_query_time
,
SHOW_SYS
},
{
sys_low_priority_updates
.
name
,
(
char
*
)
&
sys_low_priority_updates
,
SHOW_SYS
},
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment