Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
be4db499
Commit
be4db499
authored
Mar 02, 2007
by
lzhou/zhl@dev3-63.(none)
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
BUG#26307 correct inconsistant jam() and ljam() use in DBTUP source files.
parent
2248cd3d
Changes
18
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
604 additions
and
556 deletions
+604
-556
storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+93
-16
storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
+32
-34
storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
+16
-18
storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
+20
-22
storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
+6
-8
storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
+1
-0
storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp
+5
-7
storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+40
-42
storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
+34
-36
storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+47
-49
storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
+26
-28
storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
+38
-40
storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+113
-115
storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
+1
-0
storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp
+12
-14
storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
+18
-20
storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
+85
-87
storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp
storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp
+17
-20
No files found.
storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
View file @
be4db499
...
...
@@ -32,6 +32,82 @@
#include <../pgman.hpp>
#include <../tsman.hpp>
// jams
#undef jam
#undef jamEntry
#ifdef DBTUP_BUFFER_CPP
#define jam() jamLine(10000 + __LINE__)
#define jamEntry() jamEntryLine(10000 + __LINE__)
#endif
#ifdef DBTUP_ROUTINES_CPP
#define jam() jamLine(15000 + __LINE__)
#define jamEntry() jamEntryLine(15000 + __LINE__)
#endif
#ifdef DBTUP_COMMIT_CPP
#define jam() jamLine(20000 + __LINE__)
#define jamEntry() jamEntryLine(20000 + __LINE__)
#endif
#ifdef DBTUP_FIXALLOC_CPP
#define jam() jamLine(25000 + __LINE__)
#define jamEntry() jamEntryLine(25000 + __LINE__)
#endif
#ifdef DBTUP_TRIGGER_CPP
#define jam() jamLine(30000 + __LINE__)
#define jamEntry() jamEntryLine(30000 + __LINE__)
#endif
#ifdef DBTUP_ABORT_CPP
#define jam() jamLine(35000 + __LINE__)
#define jamEntry() jamEntryLine(35000 + __LINE__)
#endif
#ifdef DBTUP_PAGE_MAP_CPP
#define jam() jamLine(40000 + __LINE__)
#define jamEntry() jamEntryLine(40000 + __LINE__)
#endif
#ifdef DBTUP_PAG_MAN_CPP
#define jam() jamLine(45000 + __LINE__)
#define jamEntry() jamEntryLine(45000 + __LINE__)
#endif
#ifdef DBTUP_STORE_PROC_DEF_CPP
#define jam() jamLine(50000 + __LINE__)
#define jamEntry() jamEntryLine(50000 + __LINE__)
#endif
#ifdef DBTUP_META_CPP
#define jam() jamLine(55000 + __LINE__)
#define jamEntry() jamEntryLine(55000 + __LINE__)
#endif
#ifdef DBTUP_TAB_DES_MAN_CPP
#define jam() jamLine(60000 + __LINE__)
#define jamEntry() jamEntryLine(60000 + __LINE__)
#endif
#ifdef DBTUP_GEN_CPP
#define jam() jamLine(65000 + __LINE__)
#define jamEntry() jamEntryLine(65000 + __LINE__)
#endif
#ifdef DBTUP_INDEX_CPP
#define jam() jamLine(70000 + __LINE__)
#define jamEntry() jamEntryLine(70000 + __LINE__)
#endif
#ifdef DBTUP_DEBUG_CPP
#define jam() jamLine(75000 + __LINE__)
#define jamEntry() jamEntryLine(75000 + __LINE__)
#endif
#ifdef DBTUP_VAR_ALLOC_CPP
#define jam() jamLine(80000 + __LINE__)
#define jamEntry() jamEntryLine(80000 + __LINE__)
#endif
#ifdef DBTUP_SCAN_CPP
#define jam() jamLine(85000 + __LINE__)
#define jamEntry() jamEntryLine(85000 + __LINE__)
#endif
#ifdef DBTUP_DISK_ALLOC_CPP
#define jam() jamLine(90000 + __LINE__)
#define jamEntry() jamEntryLine(90000 + __LINE__)
#endif
#ifndef jam
#define jam() jamLine(__LINE__)
#define jamEntry() jamEntryLine(__LINE__)
#endif
#ifdef VM_TRACE
inline
const
char
*
dbgmask
(
const
Bitmask
<
MAXNROFATTRIBUTESINWORDS
>&
bm
)
{
static
int
i
=
0
;
static
char
buf
[
5
][
200
];
...
...
@@ -70,22 +146,23 @@ inline const Uint32* ALIGN_WORD(const void* ptr)
// only reports the line number in the file it currently is located in.
//
// DbtupExecQuery.cpp 0
// DbtupBuffer.cpp 2000
// DbtupRoutines.cpp 3000
// DbtupCommit.cpp 5000
// DbtupFixAlloc.cpp 6000
// DbtupTrigger.cpp 7000
// DbtupAbort.cpp 9000
// DbtupPageMap.cpp 14000
// DbtupPagMan.cpp 16000
// DbtupStoredProcDef.cpp 18000
// DbtupMeta.cpp 20000
// DbtupTabDesMan.cpp 22000
// DbtupGen.cpp 24000
// DbtupIndex.cpp 28000
// DbtupDebug.cpp 30000
// DbtupVarAlloc.cpp 32000
// DbtupScan.cpp 33000
// DbtupBuffer.cpp 10000
// DbtupRoutines.cpp 15000
// DbtupCommit.cpp 20000
// DbtupFixAlloc.cpp 25000
// DbtupTrigger.cpp 30000
// DbtupAbort.cpp 35000
// DbtupPageMap.cpp 40000
// DbtupPagMan.cpp 45000
// DbtupStoredProcDef.cpp 50000
// DbtupMeta.cpp 55000
// DbtupTabDesMan.cpp 60000
// DbtupGen.cpp 65000
// DbtupIndex.cpp 70000
// DbtupDebug.cpp 75000
// DbtupVarAlloc.cpp 80000
// DbtupScan.cpp 85000
// DbtupDiskAlloc.cpp 90000
//------------------------------------------------------------------
/*
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
View file @
be4db499
...
...
@@ -14,21 +14,19 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_ABORT_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#define ljam() { jamLine(9000 + __LINE__); }
#define ljamEntry() { jamEntryLine(9000 + __LINE__); }
void
Dbtup
::
freeAllAttrBuffers
(
Operationrec
*
const
regOperPtr
)
{
if
(
regOperPtr
->
storedProcedureId
==
RNIL
)
{
l
jam
();
jam
();
freeAttrinbufrec
(
regOperPtr
->
firstAttrinbufrec
);
}
else
{
l
jam
();
jam
();
StoredProcPtr
storedPtr
;
c_storedProcPool
.
getPtr
(
storedPtr
,
(
Uint32
)
regOperPtr
->
storedProcedureId
);
ndbrequire
(
storedPtr
.
p
->
storedCode
==
ZSCAN_PROCEDURE
);
...
...
@@ -46,7 +44,7 @@ void Dbtup::freeAttrinbufrec(Uint32 anAttrBuf)
Uint32
RnoFree
=
cnoFreeAttrbufrec
;
localAttrBufPtr
.
i
=
anAttrBuf
;
while
(
localAttrBufPtr
.
i
!=
RNIL
)
{
l
jam
();
jam
();
ptrCheckGuard
(
localAttrBufPtr
,
cnoOfAttrbufrec
,
attrbufrec
);
Ttemp
=
localAttrBufPtr
.
p
->
attrbuf
[
ZBUF_NEXT
];
localAttrBufPtr
.
p
->
attrbuf
[
ZBUF_NEXT
]
=
cfirstfreeAttrbufrec
;
...
...
@@ -62,7 +60,7 @@ void Dbtup::freeAttrinbufrec(Uint32 anAttrBuf)
*/
void
Dbtup
::
execTUP_ABORTREQ
(
Signal
*
signal
)
{
l
jamEntry
();
jamEntry
();
do_tup_abortreq
(
signal
,
0
);
}
...
...
@@ -80,7 +78,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags)
(
trans_state
==
TRANS_ERROR_WAIT_TUPKEYREQ
)
||
(
trans_state
==
TRANS_IDLE
));
if
(
regOperPtr
.
p
->
op_struct
.
op_type
==
ZREAD
)
{
l
jam
();
jam
();
freeAllAttrBuffers
(
regOperPtr
.
p
);
initOpConnection
(
regOperPtr
.
p
);
return
;
...
...
@@ -94,7 +92,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags)
if
(
get_tuple_state
(
regOperPtr
.
p
)
==
TUPLE_PREPARED
)
{
l
jam
();
jam
();
if
(
!
regTabPtr
.
p
->
tuxCustomTriggers
.
isEmpty
()
&&
(
flags
&
ZSKIP_TUX_TRIGGERS
)
==
0
)
executeTuxAbortTriggers
(
signal
,
...
...
@@ -105,12 +103,12 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags)
OperationrecPtr
loopOpPtr
;
loopOpPtr
.
i
=
regOperPtr
.
p
->
nextActiveOp
;
while
(
loopOpPtr
.
i
!=
RNIL
)
{
l
jam
();
jam
();
c_operation_pool
.
getPtr
(
loopOpPtr
);
if
(
get_tuple_state
(
loopOpPtr
.
p
)
!=
TUPLE_ALREADY_ABORTED
&&
!
regTabPtr
.
p
->
tuxCustomTriggers
.
isEmpty
()
&&
(
flags
&
ZSKIP_TUX_TRIGGERS
)
==
0
)
{
l
jam
();
jam
();
executeTuxAbortTriggers
(
signal
,
loopOpPtr
.
p
,
regFragPtr
.
p
,
...
...
@@ -211,116 +209,116 @@ int Dbtup::TUPKEY_abort(Signal* signal, int error_type)
case
1
:
//tmupdate_alloc_error:
terrorCode
=
ZMEM_NOMEM_ERROR
;
l
jam
();
jam
();
break
;
case
15
:
l
jam
();
jam
();
terrorCode
=
ZREGISTER_INIT_ERROR
;
break
;
case
16
:
l
jam
();
jam
();
terrorCode
=
ZTRY_TO_UPDATE_ERROR
;
break
;
case
17
:
l
jam
();
jam
();
terrorCode
=
ZNO_ILLEGAL_NULL_ATTR
;
break
;
case
19
:
l
jam
();
jam
();
terrorCode
=
ZTRY_TO_UPDATE_ERROR
;
break
;
case
20
:
l
jam
();
jam
();
terrorCode
=
ZREGISTER_INIT_ERROR
;
break
;
case
22
:
l
jam
();
jam
();
terrorCode
=
ZTOTAL_LEN_ERROR
;
break
;
case
23
:
l
jam
();
jam
();
terrorCode
=
ZREGISTER_INIT_ERROR
;
break
;
case
24
:
l
jam
();
jam
();
terrorCode
=
ZREGISTER_INIT_ERROR
;
break
;
case
26
:
l
jam
();
jam
();
terrorCode
=
ZREGISTER_INIT_ERROR
;
break
;
case
27
:
l
jam
();
jam
();
terrorCode
=
ZREGISTER_INIT_ERROR
;
break
;
case
28
:
l
jam
();
jam
();
terrorCode
=
ZREGISTER_INIT_ERROR
;
break
;
case
29
:
l
jam
();
jam
();
break
;
case
30
:
l
jam
();
jam
();
terrorCode
=
ZCALL_ERROR
;
break
;
case
31
:
l
jam
();
jam
();
terrorCode
=
ZSTACK_OVERFLOW_ERROR
;
break
;
case
32
:
l
jam
();
jam
();
terrorCode
=
ZSTACK_UNDERFLOW_ERROR
;
break
;
case
33
:
l
jam
();
jam
();
terrorCode
=
ZNO_INSTRUCTION_ERROR
;
break
;
case
34
:
l
jam
();
jam
();
terrorCode
=
ZOUTSIDE_OF_PROGRAM_ERROR
;
break
;
case
35
:
l
jam
();
jam
();
terrorCode
=
ZTOO_MANY_INSTRUCTIONS_ERROR
;
break
;
case
38
:
l
jam
();
jam
();
terrorCode
=
ZTEMPORARY_RESOURCE_FAILURE
;
break
;
case
39
:
if
(
get_trans_state
(
operPtr
.
p
)
==
TRANS_TOO_MUCH_AI
)
{
l
jam
();
jam
();
terrorCode
=
ZTOO_MUCH_ATTRINFO_ERROR
;
}
else
if
(
get_trans_state
(
operPtr
.
p
)
==
TRANS_ERROR_WAIT_TUPKEYREQ
)
{
l
jam
();
jam
();
terrorCode
=
ZSEIZE_ATTRINBUFREC_ERROR
;
}
else
{
ndbrequire
(
false
);
}
//if
break
;
case
40
:
l
jam
();
jam
();
terrorCode
=
ZUNSUPPORTED_BRANCH
;
break
;
default:
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
View file @
be4db499
...
...
@@ -14,28 +14,26 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_BUFFER_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#include <signaldata/TransIdAI.hpp>
#define ljam() { jamLine(2000 + __LINE__); }
#define ljamEntry() { jamEntryLine(2000 + __LINE__); }
void
Dbtup
::
execSEND_PACKED
(
Signal
*
signal
)
{
Uint16
hostId
;
Uint32
i
;
Uint32
TpackedListIndex
=
cpackedListIndex
;
l
jamEntry
();
jamEntry
();
for
(
i
=
0
;
i
<
TpackedListIndex
;
i
++
)
{
l
jam
();
jam
();
hostId
=
cpackedList
[
i
];
ndbrequire
((
hostId
-
1
)
<
(
MAX_NODES
-
1
));
// Also check not zero
Uint32
TpacketTA
=
hostBuffer
[
hostId
].
noOfPacketsTA
;
if
(
TpacketTA
!=
0
)
{
l
jam
();
jam
();
BlockReference
TBref
=
numberToRef
(
API_PACKED
,
hostId
);
Uint32
TpacketLen
=
hostBuffer
[
hostId
].
packetLenTA
;
MEMCOPY_NO_WORDS
(
&
signal
->
theData
[
0
],
...
...
@@ -73,7 +71,7 @@ void Dbtup::bufferTRANSID_AI(Signal* signal, BlockReference aRef,
// There is still space in the buffer. We will copy it into the
// buffer.
// ----------------------------------------------------------------
l
jam
();
jam
();
updatePackedList
(
signal
,
hostId
);
}
else
if
(
false
&&
TnoOfPackets
==
1
)
{
// ----------------------------------------------------------------
...
...
@@ -118,7 +116,7 @@ void Dbtup::updatePackedList(Signal* signal, Uint16 hostId)
{
if
(
hostBuffer
[
hostId
].
inPackedList
==
false
)
{
Uint32
TpackedListIndex
=
cpackedListIndex
;
l
jam
();
jam
();
hostBuffer
[
hostId
].
inPackedList
=
true
;
cpackedList
[
TpackedListIndex
]
=
hostId
;
cpackedListIndex
=
TpackedListIndex
+
1
;
...
...
@@ -149,7 +147,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
if
(
ERROR_INSERTED
(
4006
)
&&
(
nodeId
!=
getOwnNodeId
())){
// Use error insert to turn routing on
l
jam
();
jam
();
connectedToNode
=
false
;
}
...
...
@@ -167,18 +165,18 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
* Own node -> execute direct
*/
if
(
nodeId
!=
getOwnNodeId
()){
l
jam
();
jam
();
/**
* Send long sig
*/
if
(
ToutBufIndex
>=
22
&&
is_api
&&
!
old_dest
)
{
l
jam
();
jam
();
/**
* Flush buffer so that order is maintained
*/
if
(
TpacketTA
!=
0
)
{
l
jam
();
jam
();
BlockReference
TBref
=
numberToRef
(
API_PACKED
,
nodeId
);
MEMCOPY_NO_WORDS
(
&
signal
->
theData
[
0
],
&
hostBuffer
[
nodeId
].
packetBufferTA
[
0
],
...
...
@@ -202,7 +200,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
*/
#ifndef NDB_NO_DROPPED_SIGNAL
if
(
ToutBufIndex
<
22
&&
is_api
){
l
jam
();
jam
();
bufferTRANSID_AI
(
signal
,
recBlockref
,
3
+
ToutBufIndex
);
return
;
}
...
...
@@ -214,7 +212,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
Uint32
*
src
=
signal
->
theData
+
25
;
if
(
ToutBufIndex
>=
22
){
do
{
l
jam
();
jam
();
MEMCOPY_NO_WORDS
(
&
signal
->
theData
[
3
],
src
,
22
);
sendSignal
(
recBlockref
,
GSN_TRANSID_AI
,
signal
,
25
,
JBB
);
ToutBufIndex
-=
22
;
...
...
@@ -223,14 +221,14 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
}
if
(
ToutBufIndex
>
0
){
l
jam
();
jam
();
MEMCOPY_NO_WORDS
(
&
signal
->
theData
[
3
],
src
,
ToutBufIndex
);
sendSignal
(
recBlockref
,
GSN_TRANSID_AI
,
signal
,
3
+
ToutBufIndex
,
JBB
);
}
return
;
}
EXECUTE_DIRECT
(
block
,
GSN_TRANSID_AI
,
signal
,
3
+
ToutBufIndex
);
l
jamEntry
();
jamEntry
();
return
;
}
...
...
@@ -242,7 +240,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
Uint32
routeBlockref
=
req_struct
->
TC_ref
;
if
(
true
){
// TODO is_api && !old_dest){
l
jam
();
jam
();
transIdAI
->
attrData
[
0
]
=
recBlockref
;
LinearSectionPtr
ptr
[
3
];
ptr
[
0
].
p
=
&
signal
->
theData
[
25
];
...
...
@@ -260,7 +258,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
Uint32
sent
=
0
;
Uint32
maxLen
=
TransIdAI
::
DataLength
-
1
;
while
(
sent
<
tot
)
{
l
jam
();
jam
();
Uint32
dataLen
=
(
tot
-
sent
>
maxLen
)
?
maxLen
:
tot
-
sent
;
Uint32
sigLen
=
dataLen
+
TransIdAI
::
HeaderLength
+
1
;
MEMCOPY_NO_WORDS
(
&
transIdAI
->
attrData
,
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
View file @
be4db499
...
...
@@ -14,6 +14,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_COMMIT_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
...
...
@@ -21,16 +22,13 @@
#include <signaldata/TupCommit.hpp>
#include "../dblqh/Dblqh.hpp"
#define ljam() { jamLine(5000 + __LINE__); }
#define ljamEntry() { jamEntryLine(5000 + __LINE__); }
void
Dbtup
::
execTUP_DEALLOCREQ
(
Signal
*
signal
)
{
TablerecPtr
regTabPtr
;
FragrecordPtr
regFragPtr
;
Uint32
frag_page_id
,
frag_id
;
l
jamEntry
();
jamEntry
();
frag_id
=
signal
->
theData
[
0
];
regTabPtr
.
i
=
signal
->
theData
[
1
];
...
...
@@ -62,7 +60,7 @@ void Dbtup::execTUP_DEALLOCREQ(Signal* signal)
if
(
regTabPtr
.
p
->
m_attributes
[
MM
].
m_no_of_varsize
)
{
l
jam
();
jam
();
free_var_rec
(
regFragPtr
.
p
,
regTabPtr
.
p
,
&
tmp
,
pagePtr
);
}
else
{
free_fix_rec
(
regFragPtr
.
p
,
regTabPtr
.
p
,
&
tmp
,
(
Fix_page
*
)
pagePtr
.
p
);
...
...
@@ -78,7 +76,7 @@ void Dbtup::execTUP_WRITELOG_REQ(Signal* signal)
Uint32
gci
=
signal
->
theData
[
1
];
c_operation_pool
.
getPtr
(
loopOpPtr
);
while
(
loopOpPtr
.
p
->
prevActiveOp
!=
RNIL
)
{
l
jam
();
jam
();
loopOpPtr
.
i
=
loopOpPtr
.
p
->
prevActiveOp
;
c_operation_pool
.
getPtr
(
loopOpPtr
);
}
...
...
@@ -87,11 +85,11 @@ void Dbtup::execTUP_WRITELOG_REQ(Signal* signal)
signal
->
theData
[
0
]
=
loopOpPtr
.
p
->
userpointer
;
signal
->
theData
[
1
]
=
gci
;
if
(
loopOpPtr
.
p
->
nextActiveOp
==
RNIL
)
{
l
jam
();
jam
();
EXECUTE_DIRECT
(
DBLQH
,
GSN_LQH_WRITELOG_REQ
,
signal
,
2
);
return
;
}
l
jam
();
jam
();
EXECUTE_DIRECT
(
DBLQH
,
GSN_LQH_WRITELOG_REQ
,
signal
,
2
);
jamEntry
();
loopOpPtr
.
i
=
loopOpPtr
.
p
->
nextActiveOp
;
...
...
@@ -114,16 +112,16 @@ void Dbtup::removeActiveOpList(Operationrec* const regOperPtr,
if
(
regOperPtr
->
op_struct
.
in_active_list
)
{
regOperPtr
->
op_struct
.
in_active_list
=
false
;
if
(
regOperPtr
->
nextActiveOp
!=
RNIL
)
{
l
jam
();
jam
();
raoOperPtr
.
i
=
regOperPtr
->
nextActiveOp
;
c_operation_pool
.
getPtr
(
raoOperPtr
);
raoOperPtr
.
p
->
prevActiveOp
=
regOperPtr
->
prevActiveOp
;
}
else
{
l
jam
();
jam
();
tuple_ptr
->
m_operation_ptr_i
=
regOperPtr
->
prevActiveOp
;
}
if
(
regOperPtr
->
prevActiveOp
!=
RNIL
)
{
l
jam
();
jam
();
raoOperPtr
.
i
=
regOperPtr
->
prevActiveOp
;
c_operation_pool
.
getPtr
(
raoOperPtr
);
raoOperPtr
.
p
->
nextActiveOp
=
regOperPtr
->
nextActiveOp
;
...
...
@@ -343,7 +341,7 @@ Dbtup::disk_page_commit_callback(Signal* signal,
Uint32
gci
;
OperationrecPtr
regOperPtr
;
l
jamEntry
();
jamEntry
();
c_operation_pool
.
getPtr
(
regOperPtr
,
opPtrI
);
c_lqh
->
get_op_info
(
regOperPtr
.
p
->
userpointer
,
&
hash_value
,
&
gci
);
...
...
@@ -379,7 +377,7 @@ Dbtup::disk_page_log_buffer_callback(Signal* signal,
Uint32
gci
;
OperationrecPtr
regOperPtr
;
l
jamEntry
();
jamEntry
();
c_operation_pool
.
getPtr
(
regOperPtr
,
opPtrI
);
c_lqh
->
get_op_info
(
regOperPtr
.
p
->
userpointer
,
&
hash_value
,
&
gci
);
...
...
@@ -447,7 +445,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
TupCommitReq
*
const
tupCommitReq
=
(
TupCommitReq
*
)
signal
->
getDataPtr
();
regOperPtr
.
i
=
tupCommitReq
->
opPtr
;
l
jamEntry
();
jamEntry
();
c_operation_pool
.
getPtr
(
regOperPtr
);
if
(
!
regOperPtr
.
p
->
is_first_operation
())
...
...
@@ -603,7 +601,7 @@ skip_disk:
* why can't we instead remove "own version" (when approriate ofcourse)
*/
if
(
!
regTabPtr
.
p
->
tuxCustomTriggers
.
isEmpty
())
{
l
jam
();
jam
();
OperationrecPtr
loopPtr
=
regOperPtr
;
while
(
loopPtr
.
i
!=
RNIL
)
{
...
...
@@ -656,18 +654,18 @@ Dbtup::set_change_mask_info(KeyReqStruct * const req_struct,
{
ChangeMaskState
state
=
get_change_mask_state
(
regOperPtr
);
if
(
state
==
USE_SAVED_CHANGE_MASK
)
{
l
jam
();
jam
();
req_struct
->
changeMask
.
setWord
(
0
,
regOperPtr
->
saved_change_mask
[
0
]);
req_struct
->
changeMask
.
setWord
(
1
,
regOperPtr
->
saved_change_mask
[
1
]);
}
else
if
(
state
==
RECALCULATE_CHANGE_MASK
)
{
l
jam
();
jam
();
// Recompute change mask, for now set all bits
req_struct
->
changeMask
.
set
();
}
else
if
(
state
==
SET_ALL_MASK
)
{
l
jam
();
jam
();
req_struct
->
changeMask
.
set
();
}
else
{
l
jam
();
jam
();
ndbrequire
(
state
==
DELETE_CHANGES
);
req_struct
->
changeMask
.
set
();
}
...
...
@@ -687,17 +685,17 @@ Dbtup::calculateChangeMask(Page* const pagePtr,
ndbrequire
(
loopOpPtr
.
p
->
op_struct
.
op_type
==
ZUPDATE
);
ChangeMaskState
change_mask
=
get_change_mask_state
(
loopOpPtr
.
p
);
if
(
change_mask
==
USE_SAVED_CHANGE_MASK
)
{
l
jam
();
jam
();
saved_word1
|=
loopOpPtr
.
p
->
saved_change_mask
[
0
];
saved_word2
|=
loopOpPtr
.
p
->
saved_change_mask
[
1
];
}
else
if
(
change_mask
==
RECALCULATE_CHANGE_MASK
)
{
l
jam
();
jam
();
//Recompute change mask, for now set all bits
req_struct
->
changeMask
.
set
();
return
;
}
else
{
ndbrequire
(
change_mask
==
SET_ALL_MASK
);
l
jam
();
jam
();
req_struct
->
changeMask
.
set
();
return
;
}
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
View file @
be4db499
...
...
@@ -15,6 +15,7 @@
#define DBTUP_C
#define DBTUP_DEBUG_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
...
...
@@ -24,9 +25,6 @@
#include <signaldata/EventReport.hpp>
#include <Vector.hpp>
#define ljam() { jamLine(30000 + __LINE__); }
#define ljamEntry() { jamEntryLine(30000 + __LINE__); }
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* ------------------------ DEBUG MODULE -------------------------- */
...
...
@@ -35,7 +33,7 @@
void
Dbtup
::
execDEBUG_SIG
(
Signal
*
signal
)
{
PagePtr
regPagePtr
;
l
jamEntry
();
jamEntry
();
regPagePtr
.
i
=
signal
->
theData
[
0
];
c_page_pool
.
getPtr
(
regPagePtr
);
}
//Dbtup::execDEBUG_SIG()
...
...
@@ -248,18 +246,18 @@ void Dbtup::execMEMCHECKREQ(Signal* signal)
PagePtr regPagePtr;
Uint32* data = &signal->theData[0];
l
jamEntry();
jamEntry();
BlockReference blockref = signal->theData[0];
Uint32 i;
for (i = 0; i < 25; i++) {
l
jam();
jam();
data[i] = 0;
}//for
for (i = 0; i < 16; i++) {
regPagePtr.i = cfreepageList[i];
l
jam();
jam();
while (regPagePtr.i != RNIL) {
l
jam();
jam();
ptrCheckGuard(regPagePtr, cnoOfPage, cpage);
regPagePtr.i = regPagePtr.p->next_page;
data[0]++;
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
View file @
be4db499
...
...
@@ -14,6 +14,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_DISK_ALLOC_CPP
#include "Dbtup.hpp"
static
bool
f_undo_done
=
true
;
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp
View file @
be4db499
...
...
@@ -14,14 +14,12 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_FIXALLOC_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#define ljam() { jamLine(6000 + __LINE__); }
#define ljamEntry() { jamEntryLine(6000 + __LINE__); }
//
// Fixed Allocator
// This module is used to allocate and free fixed size tuples from the
...
...
@@ -79,7 +77,7 @@ Dbtup::alloc_fix_rec(Fragrecord* const regFragPtr,
/* ---------------------------------------------------------------- */
pagePtr
.
i
=
getEmptyPage
(
regFragPtr
);
if
(
pagePtr
.
i
!=
RNIL
)
{
l
jam
();
jam
();
/* ---------------------------------------------------------------- */
// We found empty pages on the fragment. Allocate an empty page and
// convert it into a tuple header page and put it in thFreeFirst-list.
...
...
@@ -95,14 +93,14 @@ Dbtup::alloc_fix_rec(Fragrecord* const regFragPtr,
LocalDLList
<
Page
>
free_pages
(
c_page_pool
,
regFragPtr
->
thFreeFirst
);
free_pages
.
add
(
pagePtr
);
}
else
{
l
jam
();
jam
();
/* ---------------------------------------------------------------- */
/* THERE ARE NO EMPTY PAGES. MEMORY CAN NOT BE ALLOCATED. */
/* ---------------------------------------------------------------- */
return
0
;
}
}
else
{
l
jam
();
jam
();
/* ---------------------------------------------------------------- */
/* THIS SHOULD BE THE COMMON PATH THROUGH THE CODE, FREE */
/* COPY PAGE EXISTED. */
...
...
@@ -194,7 +192,7 @@ void Dbtup::free_fix_rec(Fragrecord* regFragPtr,
if
(
free
==
1
)
{
l
jam
();
jam
();
PagePtr
pagePtr
=
{
(
Page
*
)
regPagePtr
,
key
->
m_page_no
};
LocalDLList
<
Page
>
free_pages
(
c_page_pool
,
regFragPtr
->
thFreeFirst
);
ndbrequire
(
regPagePtr
->
page_state
==
ZTH_MM_FULL
);
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
View file @
be4db499
...
...
@@ -15,6 +15,7 @@
#define DBTUP_C
#define DBTUP_GEN_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
...
...
@@ -34,9 +35,6 @@
#define DEBUG(x) { ndbout << "TUP::" << x << endl; }
#define ljam() { jamLine(24000 + __LINE__); }
#define ljamEntry() { jamEntryLine(24000 + __LINE__); }
void
Dbtup
::
initData
()
{
cnoOfAttrbufrec
=
ZNO_OF_ATTRBUFREC
;
...
...
@@ -152,21 +150,21 @@ BLOCK_FUNCTIONS(Dbtup)
void
Dbtup
::
execCONTINUEB
(
Signal
*
signal
)
{
l
jamEntry
();
jamEntry
();
Uint32
actionType
=
signal
->
theData
[
0
];
Uint32
dataPtr
=
signal
->
theData
[
1
];
switch
(
actionType
)
{
case
ZINITIALISE_RECORDS
:
l
jam
();
jam
();
initialiseRecordsLab
(
signal
,
dataPtr
,
signal
->
theData
[
2
],
signal
->
theData
[
3
]);
break
;
case
ZREL_FRAG
:
l
jam
();
jam
();
releaseFragment
(
signal
,
dataPtr
,
signal
->
theData
[
2
]);
break
;
case
ZREPORT_MEMORY_USAGE
:{
l
jam
();
jam
();
static
int
c_currentMemUsed
=
0
;
Uint32
cnt
=
signal
->
theData
[
1
];
Uint32
tmp
=
c_page_pool
.
getSize
();
...
...
@@ -201,11 +199,11 @@ void Dbtup::execCONTINUEB(Signal* signal)
return
;
}
case
ZBUILD_INDEX
:
l
jam
();
jam
();
buildIndex
(
signal
,
dataPtr
);
break
;
case
ZTUP_SCAN
:
l
jam
();
jam
();
{
ScanOpPtr
scanPtr
;
c_scanOpPool
.
getPtr
(
scanPtr
,
dataPtr
);
...
...
@@ -214,7 +212,7 @@ void Dbtup::execCONTINUEB(Signal* signal)
return
;
case
ZFREE_EXTENT
:
{
l
jam
();
jam
();
TablerecPtr
tabPtr
;
tabPtr
.
i
=
dataPtr
;
...
...
@@ -227,7 +225,7 @@ void Dbtup::execCONTINUEB(Signal* signal)
}
case
ZUNMAP_PAGES
:
{
l
jam
();
jam
();
TablerecPtr
tabPtr
;
tabPtr
.
i
=
dataPtr
;
...
...
@@ -240,7 +238,7 @@ void Dbtup::execCONTINUEB(Signal* signal)
}
case
ZFREE_VAR_PAGES
:
{
l
jam
();
jam
();
drop_fragment_free_var_pages
(
signal
);
return
;
}
...
...
@@ -257,12 +255,12 @@ void Dbtup::execCONTINUEB(Signal* signal)
/* **************************************************************** */
void
Dbtup
::
execSTTOR
(
Signal
*
signal
)
{
l
jamEntry
();
jamEntry
();
Uint32
startPhase
=
signal
->
theData
[
1
];
Uint32
sigKey
=
signal
->
theData
[
6
];
switch
(
startPhase
)
{
case
ZSTARTPHASE1
:
l
jam
();
jam
();
CLEAR_ERROR_INSERT_VALUE
;
ndbrequire
((
c_lqh
=
(
Dblqh
*
)
globalData
.
getBlock
(
DBLQH
))
!=
0
);
ndbrequire
((
c_tsman
=
(
Tsman
*
)
globalData
.
getBlock
(
TSMAN
))
!=
0
);
...
...
@@ -270,7 +268,7 @@ void Dbtup::execSTTOR(Signal* signal)
cownref
=
calcTupBlockRef
(
0
);
break
;
default:
l
jam
();
jam
();
break
;
}
//switch
signal
->
theData
[
0
]
=
sigKey
;
...
...
@@ -293,7 +291,7 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
Uint32
senderData
=
req
->
senderData
;
ndbrequire
(
req
->
noOfParameters
==
0
);
l
jamEntry
();
jamEntry
();
const
ndb_mgm_configuration_iterator
*
p
=
m_ctx
.
m_config
.
getOwnConfigIterator
();
...
...
@@ -413,58 +411,58 @@ void Dbtup::initialiseRecordsLab(Signal* signal, Uint32 switchData,
{
switch
(
switchData
)
{
case
0
:
l
jam
();
jam
();
initializeHostBuffer
();
break
;
case
1
:
l
jam
();
jam
();
initializeOperationrec
();
break
;
case
2
:
l
jam
();
jam
();
initializePage
();
break
;
case
3
:
l
jam
();
jam
();
break
;
case
4
:
l
jam
();
jam
();
initializeTablerec
();
break
;
case
5
:
l
jam
();
jam
();
break
;
case
6
:
l
jam
();
jam
();
initializeFragrecord
();
break
;
case
7
:
l
jam
();
jam
();
initializeFragoperrec
();
break
;
case
8
:
l
jam
();
jam
();
initializePageRange
();
break
;
case
9
:
l
jam
();
jam
();
initializeTabDescr
();
break
;
case
10
:
l
jam
();
jam
();
break
;
case
11
:
l
jam
();
jam
();
break
;
case
12
:
l
jam
();
jam
();
initializeAttrbufrec
();
break
;
case
13
:
l
jam
();
jam
();
break
;
case
14
:
l
jam
();
jam
();
{
ReadConfigConf
*
conf
=
(
ReadConfigConf
*
)
signal
->
getDataPtrSend
();
...
...
@@ -488,28 +486,28 @@ void Dbtup::initialiseRecordsLab(Signal* signal, Uint32 switchData,
void
Dbtup
::
execNDB_STTOR
(
Signal
*
signal
)
{
l
jamEntry
();
jamEntry
();
cndbcntrRef
=
signal
->
theData
[
0
];
Uint32
ownNodeId
=
signal
->
theData
[
1
];
Uint32
startPhase
=
signal
->
theData
[
2
];
switch
(
startPhase
)
{
case
ZSTARTPHASE1
:
l
jam
();
jam
();
cownNodeId
=
ownNodeId
;
cownref
=
calcTupBlockRef
(
ownNodeId
);
break
;
case
ZSTARTPHASE2
:
l
jam
();
jam
();
break
;
case
ZSTARTPHASE3
:
l
jam
();
jam
();
startphase3Lab
(
signal
,
~
0
,
~
0
);
break
;
case
ZSTARTPHASE4
:
l
jam
();
jam
();
break
;
case
ZSTARTPHASE6
:
l
jam
();
jam
();
/*****************************************/
/* NOW SET THE DISK WRITE SPEED TO */
/* PAGES PER TICK AFTER SYSTEM */
...
...
@@ -520,7 +518,7 @@ void Dbtup::execNDB_STTOR(Signal* signal)
sendSignalWithDelay
(
reference
(),
GSN_CONTINUEB
,
signal
,
1000
,
1
);
break
;
default:
l
jam
();
jam
();
break
;
}
//switch
signal
->
theData
[
0
]
=
cownref
;
...
...
@@ -597,7 +595,7 @@ void Dbtup::initializeTablerec()
{
TablerecPtr
regTabPtr
;
for
(
regTabPtr
.
i
=
0
;
regTabPtr
.
i
<
cnoOfTablerec
;
regTabPtr
.
i
++
)
{
l
jam
();
jam
();
refresh_watch_dog
();
ptrAss
(
regTabPtr
,
tablerec
);
initTab
(
regTabPtr
.
p
);
...
...
@@ -668,12 +666,12 @@ void Dbtup::initializeTabDescr()
void
Dbtup
::
execTUPSEIZEREQ
(
Signal
*
signal
)
{
OperationrecPtr
regOperPtr
;
l
jamEntry
();
jamEntry
();
Uint32
userPtr
=
signal
->
theData
[
0
];
BlockReference
userRef
=
signal
->
theData
[
1
];
if
(
!
c_operation_pool
.
seize
(
regOperPtr
))
{
l
jam
();
jam
();
signal
->
theData
[
0
]
=
userPtr
;
signal
->
theData
[
1
]
=
ZGET_OPREC_ERROR
;
sendSignal
(
userRef
,
GSN_TUPSEIZEREF
,
signal
,
2
,
JBB
);
...
...
@@ -707,7 +705,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* signal)
void
Dbtup
::
execTUPRELEASEREQ
(
Signal
*
signal
)
{
OperationrecPtr
regOperPtr
;
l
jamEntry
();
jamEntry
();
regOperPtr
.
i
=
signal
->
theData
[
0
];
c_operation_pool
.
getPtr
(
regOperPtr
);
set_trans_state
(
regOperPtr
.
p
,
TRANS_DISCONNECTED
);
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
View file @
be4db499
...
...
@@ -14,6 +14,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_INDEX_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
...
...
@@ -23,9 +24,6 @@
#include <AttributeHeader.hpp>
#include <signaldata/TuxMaint.hpp>
#define ljam() { jamLine(28000 + __LINE__); }
#define ljamEntry() { jamEntryLine(28000 + __LINE__); }
// methods used by ordered index
void
...
...
@@ -34,7 +32,7 @@ Dbtup::tuxGetTupAddr(Uint32 fragPtrI,
Uint32
pageIndex
,
Uint32
&
tupAddr
)
{
l
jamEntry
();
jamEntry
();
PagePtr
pagePtr
;
c_page_pool
.
getPtr
(
pagePtr
,
pageId
);
Uint32
fragPageId
=
pagePtr
.
p
->
frag_page_id
;
...
...
@@ -48,7 +46,7 @@ Dbtup::tuxAllocNode(Signal* signal,
Uint32
&
pageOffset
,
Uint32
*&
node
)
{
l
jamEntry
();
jamEntry
();
FragrecordPtr
fragPtr
;
fragPtr
.
i
=
fragPtrI
;
ptrCheckGuard
(
fragPtr
,
cnoOfFragrec
,
fragrecord
);
...
...
@@ -61,7 +59,7 @@ Dbtup::tuxAllocNode(Signal* signal,
Uint32
*
ptr
,
frag_page_id
;
if
((
ptr
=
alloc_fix_rec
(
fragPtr
.
p
,
tablePtr
.
p
,
&
key
,
&
frag_page_id
))
==
0
)
{
l
jam
();
jam
();
terrorCode
=
ZMEM_NOMEM_ERROR
;
// caller sets error
return
terrorCode
;
}
...
...
@@ -82,7 +80,7 @@ Dbtup::tuxFreeNode(Signal* signal,
Uint32 pageOffset,
Uint32* node)
{
l
jamEntry();
jamEntry();
FragrecordPtr fragPtr;
fragPtr.i= fragPtrI;
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
...
...
@@ -105,7 +103,7 @@ Dbtup::tuxGetNode(Uint32 fragPtrI,
Uint32
pageOffset
,
Uint32
*&
node
)
{
l
jamEntry
();
jamEntry
();
FragrecordPtr
fragPtr
;
fragPtr
.
i
=
fragPtrI
;
ptrCheckGuard
(
fragPtr
,
cnoOfFragrec
,
fragrecord
);
...
...
@@ -130,7 +128,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI,
Uint32
numAttrs
,
Uint32
*
dataOut
)
{
l
jamEntry
();
jamEntry
();
// use own variables instead of globals
FragrecordPtr
fragPtr
;
fragPtr
.
i
=
fragPtrI
;
...
...
@@ -150,21 +148,21 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI,
Tuple_header
*
tuple_ptr
=
req_struct
.
m_tuple_ptr
;
if
(
tuple_ptr
->
get_tuple_version
()
!=
tupVersion
)
{
l
jam
();
jam
();
OperationrecPtr
opPtr
;
opPtr
.
i
=
tuple_ptr
->
m_operation_ptr_i
;
Uint32
loopGuard
=
0
;
while
(
opPtr
.
i
!=
RNIL
)
{
c_operation_pool
.
getPtr
(
opPtr
);
if
(
opPtr
.
p
->
tupVersion
==
tupVersion
)
{
l
jam
();
jam
();
if
(
!
opPtr
.
p
->
m_copy_tuple_location
.
isNull
())
{
req_struct
.
m_tuple_ptr
=
(
Tuple_header
*
)
c_undo_buffer
.
get_ptr
(
&
opPtr
.
p
->
m_copy_tuple_location
);
}
break
;
}
l
jam
();
jam
();
opPtr
.
i
=
opPtr
.
p
->
prevActiveOp
;
ndbrequire
(
++
loopGuard
<
(
1
<<
ZTUP_VERSION_BITS
));
}
...
...
@@ -202,7 +200,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI,
int
Dbtup
::
tuxReadPk
(
Uint32
fragPtrI
,
Uint32
pageId
,
Uint32
pageIndex
,
Uint32
*
dataOut
,
bool
xfrmFlag
)
{
l
jamEntry
();
jamEntry
();
// use own variables instead of globals
FragrecordPtr
fragPtr
;
fragPtr
.
i
=
fragPtrI
;
...
...
@@ -305,7 +303,7 @@ Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32* dataO
int
Dbtup
::
accReadPk
(
Uint32
tableId
,
Uint32
fragId
,
Uint32
fragPageId
,
Uint32
pageIndex
,
Uint32
*
dataOut
,
bool
xfrmFlag
)
{
l
jamEntry
();
jamEntry
();
// get table
TablerecPtr
tablePtr
;
tablePtr
.
i
=
tableId
;
...
...
@@ -329,7 +327,7 @@ Dbtup::tuxQueryTh(Uint32 fragPtrI,
Uint32
transId2
,
Uint32
savePointId
)
{
l
jamEntry
();
jamEntry
();
FragrecordPtr
fragPtr
;
fragPtr
.
i
=
fragPtrI
;
ptrCheckGuard
(
fragPtr
,
cnoOfFragrec
,
fragrecord
);
...
...
@@ -358,9 +356,9 @@ Dbtup::tuxQueryTh(Uint32 fragPtrI,
* for this transaction and savepoint id. If its tuple version
* equals the requested then we have a visible tuple otherwise not.
*/
l
jam
();
jam
();
if
(
req_struct
.
m_tuple_ptr
->
get_tuple_version
()
==
tupVersion
)
{
l
jam
();
jam
();
return
true
;
}
}
...
...
@@ -378,7 +376,7 @@ Dbtup::tuxQueryTh(Uint32 fragPtrI,
void
Dbtup
::
execBUILDINDXREQ
(
Signal
*
signal
)
{
l
jamEntry
();
jamEntry
();
#ifdef TIME_MEASUREMENT
time_events
=
0
;
tot_time_passed
=
0
;
...
...
@@ -387,7 +385,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal)
// get new operation
BuildIndexPtr
buildPtr
;
if
(
!
c_buildIndexList
.
seize
(
buildPtr
))
{
l
jam
();
jam
();
BuildIndexRec
buildRec
;
memcpy
(
buildRec
.
m_request
,
signal
->
theData
,
sizeof
(
buildRec
.
m_request
));
buildRec
.
m_errorCode
=
BuildIndxRef
::
Busy
;
...
...
@@ -402,7 +400,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal)
do
{
const
BuildIndxReq
*
buildReq
=
(
const
BuildIndxReq
*
)
buildPtr
.
p
->
m_request
;
if
(
buildReq
->
getTableId
()
>=
cnoOfTablerec
)
{
l
jam
();
jam
();
buildPtr
.
p
->
m_errorCode
=
BuildIndxRef
::
InvalidPrimaryTable
;
break
;
}
...
...
@@ -410,7 +408,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal)
tablePtr
.
i
=
buildReq
->
getTableId
();
ptrCheckGuard
(
tablePtr
,
cnoOfTablerec
,
tablerec
);
if
(
tablePtr
.
p
->
tableStatus
!=
DEFINED
)
{
l
jam
();
jam
();
buildPtr
.
p
->
m_errorCode
=
BuildIndxRef
::
InvalidPrimaryTable
;
break
;
}
...
...
@@ -418,7 +416,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal)
buildPtr
.
p
->
m_build_vs
=
tablePtr
.
p
->
m_attributes
[
MM
].
m_no_of_varsize
>
0
;
if
(
DictTabInfo
::
isOrderedIndex
(
buildReq
->
getIndexType
()))
{
l
jam
();
jam
();
const
DLList
<
TupTriggerData
>&
triggerList
=
tablePtr
.
p
->
tuxCustomTriggers
;
...
...
@@ -426,13 +424,13 @@ Dbtup::execBUILDINDXREQ(Signal* signal)
triggerList
.
first
(
triggerPtr
);
while
(
triggerPtr
.
i
!=
RNIL
)
{
if
(
triggerPtr
.
p
->
indexId
==
buildReq
->
getIndexId
())
{
l
jam
();
jam
();
break
;
}
triggerList
.
next
(
triggerPtr
);
}
if
(
triggerPtr
.
i
==
RNIL
)
{
l
jam
();
jam
();
// trigger was not created
buildPtr
.
p
->
m_errorCode
=
BuildIndxRef
::
InternalError
;
break
;
...
...
@@ -440,12 +438,12 @@ Dbtup::execBUILDINDXREQ(Signal* signal)
buildPtr
.
p
->
m_indexId
=
buildReq
->
getIndexId
();
buildPtr
.
p
->
m_buildRef
=
DBTUX
;
}
else
if
(
buildReq
->
getIndexId
()
==
RNIL
)
{
l
jam
();
jam
();
// REBUILD of acc
buildPtr
.
p
->
m_indexId
=
RNIL
;
buildPtr
.
p
->
m_buildRef
=
DBACC
;
}
else
{
l
jam
();
jam
();
buildPtr
.
p
->
m_errorCode
=
BuildIndxRef
::
InvalidIndexType
;
break
;
}
...
...
@@ -490,7 +488,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
// get fragment
FragrecordPtr
fragPtr
;
if
(
buildPtr
.
p
->
m_fragNo
==
MAX_FRAG_PER_NODE
)
{
l
jam
();
jam
();
// build ready
buildIndexReply
(
signal
,
buildPtr
.
p
);
c_buildIndexList
.
release
(
buildPtr
);
...
...
@@ -499,7 +497,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
ndbrequire
(
buildPtr
.
p
->
m_fragNo
<
MAX_FRAG_PER_NODE
);
fragPtr
.
i
=
tablePtr
.
p
->
fragrec
[
buildPtr
.
p
->
m_fragNo
];
if
(
fragPtr
.
i
==
RNIL
)
{
l
jam
();
jam
();
buildPtr
.
p
->
m_fragNo
++
;
buildPtr
.
p
->
m_pageId
=
0
;
buildPtr
.
p
->
m_tupleNo
=
firstTupleNo
;
...
...
@@ -509,7 +507,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
// get page
PagePtr
pagePtr
;
if
(
buildPtr
.
p
->
m_pageId
>=
fragPtr
.
p
->
noOfPages
)
{
l
jam
();
jam
();
buildPtr
.
p
->
m_fragNo
++
;
buildPtr
.
p
->
m_pageId
=
0
;
buildPtr
.
p
->
m_tupleNo
=
firstTupleNo
;
...
...
@@ -520,7 +518,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
Uint32
pageState
=
pagePtr
.
p
->
page_state
;
// skip empty page
if
(
pageState
==
ZEMPTY_MM
)
{
l
jam
();
jam
();
buildPtr
.
p
->
m_pageId
++
;
buildPtr
.
p
->
m_tupleNo
=
firstTupleNo
;
break
;
...
...
@@ -530,7 +528,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
const
Tuple_header
*
tuple_ptr
=
0
;
pageIndex
=
buildPtr
.
p
->
m_tupleNo
*
tupheadsize
;
if
(
pageIndex
+
tupheadsize
>
Fix_page
::
DATA_WORDS
)
{
l
jam
();
jam
();
buildPtr
.
p
->
m_pageId
++
;
buildPtr
.
p
->
m_tupleNo
=
firstTupleNo
;
break
;
...
...
@@ -538,7 +536,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
tuple_ptr
=
(
Tuple_header
*
)
&
pagePtr
.
p
->
m_data
[
pageIndex
];
// skip over free tuple
if
(
tuple_ptr
->
m_header_bits
&
Tuple_header
::
FREE
)
{
l
jam
();
jam
();
buildPtr
.
p
->
m_tupleNo
++
;
break
;
}
...
...
@@ -581,7 +579,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
tuple as a copy tuple. The original tuple is stable and is thus
preferrable to store in TUX.
*/
l
jam
();
jam
();
/**
* Since copy tuples now can't be found on real pages.
...
...
@@ -610,11 +608,11 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
}
while
(
req
->
errorCode
==
0
&&
pageOperPtr
.
i
!=
RNIL
);
}
l
jamEntry
();
jamEntry
();
if
(
req
->
errorCode
!=
0
)
{
switch
(
req
->
errorCode
)
{
case
TuxMaintReq
:
:
NoMemError
:
l
jam
();
jam
();
buildPtr
.
p
->
m_errorCode
=
BuildIndxRef
::
AllocationFailure
;
break
;
default:
...
...
@@ -666,7 +664,7 @@ Dbtup::buildIndexReply(Signal* signal, const BuildIndexRec* buildPtrP)
rep
->
setIndexId
(
buildReq
->
getIndexId
());
// conf
if
(
buildPtrP
->
m_errorCode
==
BuildIndxRef
::
NoError
)
{
l
jam
();
jam
();
sendSignal
(
rep
->
getUserRef
(),
GSN_BUILDINDXCONF
,
signal
,
BuildIndxConf
::
SignalLength
,
JBB
);
return
;
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
View file @
be4db499
This diff is collapsed.
Click to expand it.
storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
View file @
be4db499
...
...
@@ -14,14 +14,12 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_PAG_MAN_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#define ljam() { jamLine(16000 + __LINE__); }
#define ljamEntry() { jamEntryLine(16000 + __LINE__); }
/* ---------------------------------------------------------------- */
// 4) Page Memory Manager (buddy algorithm)
//
...
...
@@ -121,7 +119,7 @@ void Dbtup::initializePage()
}
//for
PagePtr
pagePtr
;
for
(
pagePtr
.
i
=
0
;
pagePtr
.
i
<
c_page_pool
.
getSize
();
pagePtr
.
i
++
)
{
l
jam
();
jam
();
refresh_watch_dog
();
c_page_pool
.
getPtr
(
pagePtr
);
pagePtr
.
p
->
physical_page_id
=
RNIL
;
...
...
@@ -153,16 +151,16 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate,
Uint32
&
allocPageRef
)
{
if
(
noOfPagesToAllocate
==
0
){
l
jam
();
jam
();
noOfPagesAllocated
=
0
;
return
;
}
//if
Uint32
firstListToCheck
=
nextHigherTwoLog
(
noOfPagesToAllocate
-
1
);
for
(
Uint32
i
=
firstListToCheck
;
i
<
16
;
i
++
)
{
l
jam
();
jam
();
if
(
cfreepageList
[
i
]
!=
RNIL
)
{
l
jam
();
jam
();
/* ---------------------------------------------------------------- */
/* PROPER AMOUNT OF PAGES WERE FOUND. NOW SPLIT THE FOUND */
/* AREA AND RETURN THE PART NOT NEEDED. */
...
...
@@ -182,11 +180,11 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate,
/* ---------------------------------------------------------------- */
if
(
firstListToCheck
)
{
l
jam
();
jam
();
for
(
Uint32
j
=
firstListToCheck
-
1
;
(
Uint32
)
~
j
;
j
--
)
{
l
jam
();
jam
();
if
(
cfreepageList
[
j
]
!=
RNIL
)
{
l
jam
();
jam
();
/* ---------------------------------------------------------------- */
/* SOME AREA WAS FOUND, ALLOCATE ALL OF IT. */
/* ---------------------------------------------------------------- */
...
...
@@ -212,9 +210,9 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate,
void
Dbtup
::
returnCommonArea
(
Uint32
retPageRef
,
Uint32
retNo
)
{
do
{
l
jam
();
jam
();
if
(
retNo
==
0
)
{
l
jam
();
jam
();
return
;
}
//if
Uint32
list
=
nextHigherTwoLog
(
retNo
)
-
1
;
...
...
@@ -231,28 +229,28 @@ void Dbtup::findFreeLeftNeighbours(Uint32& allocPageRef,
PagePtr
pageFirstPtr
,
pageLastPtr
;
Uint32
remainAllocate
=
noOfPagesToAllocate
-
noPagesAllocated
;
while
(
allocPageRef
>
0
)
{
l
jam
();
jam
();
pageLastPtr
.
i
=
allocPageRef
-
1
;
c_page_pool
.
getPtr
(
pageLastPtr
);
if
(
pageLastPtr
.
p
->
page_state
!=
ZFREE_COMMON
)
{
l
jam
();
jam
();
return
;
}
else
{
l
jam
();
jam
();
pageFirstPtr
.
i
=
pageLastPtr
.
p
->
first_cluster_page
;
ndbrequire
(
pageFirstPtr
.
i
!=
RNIL
);
Uint32
list
=
nextHigherTwoLog
(
pageLastPtr
.
i
-
pageFirstPtr
.
i
);
removeCommonArea
(
pageFirstPtr
.
i
,
list
);
Uint32
listSize
=
1
<<
list
;
if
(
listSize
>
remainAllocate
)
{
l
jam
();
jam
();
Uint32
retNo
=
listSize
-
remainAllocate
;
returnCommonArea
(
pageFirstPtr
.
i
,
retNo
);
allocPageRef
=
pageFirstPtr
.
i
+
retNo
;
noPagesAllocated
=
noOfPagesToAllocate
;
return
;
}
else
{
l
jam
();
jam
();
allocPageRef
=
pageFirstPtr
.
i
;
noPagesAllocated
+=
listSize
;
remainAllocate
-=
listSize
;
...
...
@@ -268,32 +266,32 @@ void Dbtup::findFreeRightNeighbours(Uint32& allocPageRef,
PagePtr
pageFirstPtr
,
pageLastPtr
;
Uint32
remainAllocate
=
noOfPagesToAllocate
-
noPagesAllocated
;
if
(
remainAllocate
==
0
)
{
l
jam
();
jam
();
return
;
}
//if
while
((
allocPageRef
+
noPagesAllocated
)
<
c_page_pool
.
getSize
())
{
l
jam
();
jam
();
pageFirstPtr
.
i
=
allocPageRef
+
noPagesAllocated
;
c_page_pool
.
getPtr
(
pageFirstPtr
);
if
(
pageFirstPtr
.
p
->
page_state
!=
ZFREE_COMMON
)
{
l
jam
();
jam
();
return
;
}
else
{
l
jam
();
jam
();
pageLastPtr
.
i
=
pageFirstPtr
.
p
->
last_cluster_page
;
ndbrequire
(
pageLastPtr
.
i
!=
RNIL
);
Uint32
list
=
nextHigherTwoLog
(
pageLastPtr
.
i
-
pageFirstPtr
.
i
);
removeCommonArea
(
pageFirstPtr
.
i
,
list
);
Uint32
listSize
=
1
<<
list
;
if
(
listSize
>
remainAllocate
)
{
l
jam
();
jam
();
Uint32
retPageRef
=
pageFirstPtr
.
i
+
remainAllocate
;
Uint32
retNo
=
listSize
-
remainAllocate
;
returnCommonArea
(
retPageRef
,
retNo
);
noPagesAllocated
+=
remainAllocate
;
return
;
}
else
{
l
jam
();
jam
();
noPagesAllocated
+=
listSize
;
remainAllocate
-=
listSize
;
}
//if
...
...
@@ -328,30 +326,30 @@ void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list)
c_page_pool
.
getPtr
(
remPagePtr
,
remPageRef
);
ndbrequire
(
list
<
16
);
if
(
cfreepageList
[
list
]
==
remPagePtr
.
i
)
{
l
jam
();
jam
();
cfreepageList
[
list
]
=
remPagePtr
.
p
->
next_cluster_page
;
pageNextPtr
.
i
=
cfreepageList
[
list
];
if
(
pageNextPtr
.
i
!=
RNIL
)
{
l
jam
();
jam
();
c_page_pool
.
getPtr
(
pageNextPtr
);
pageNextPtr
.
p
->
prev_cluster_page
=
RNIL
;
}
//if
}
else
{
pageSearchPtr
.
i
=
cfreepageList
[
list
];
while
(
true
)
{
l
jam
();
jam
();
c_page_pool
.
getPtr
(
pageSearchPtr
);
pagePrevPtr
=
pageSearchPtr
;
pageSearchPtr
.
i
=
pageSearchPtr
.
p
->
next_cluster_page
;
if
(
pageSearchPtr
.
i
==
remPagePtr
.
i
)
{
l
jam
();
jam
();
break
;
}
//if
}
//while
pageNextPtr
.
i
=
remPagePtr
.
p
->
next_cluster_page
;
pagePrevPtr
.
p
->
next_cluster_page
=
pageNextPtr
.
i
;
if
(
pageNextPtr
.
i
!=
RNIL
)
{
l
jam
();
jam
();
c_page_pool
.
getPtr
(
pageNextPtr
);
pageNextPtr
.
p
->
prev_cluster_page
=
pagePrevPtr
.
i
;
}
//if
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
View file @
be4db499
...
...
@@ -15,14 +15,12 @@
#define DBTUP_C
#define DBTUP_PAGE_MAP_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#define ljam() { jamLine(14000 + __LINE__); }
#define ljamEntry() { jamEntryLine(14000 + __LINE__); }
//
// PageMap is a service used by Dbtup to map logical page id's to physical
// page id's. The mapping is needs the fragment and the logical page id to
...
...
@@ -92,11 +90,11 @@ Uint32 Dbtup::getEmptyPage(Fragrecord* regFragPtr)
{
Uint32
pageId
=
regFragPtr
->
emptyPrimPage
.
firstItem
;
if
(
pageId
==
RNIL
)
{
l
jam
();
jam
();
allocMoreFragPages
(
regFragPtr
);
pageId
=
regFragPtr
->
emptyPrimPage
.
firstItem
;
if
(
pageId
==
RNIL
)
{
l
jam
();
jam
();
return
RNIL
;
}
//if
}
//if
...
...
@@ -122,11 +120,11 @@ Uint32 Dbtup::getRealpid(Fragrecord* regFragPtr, Uint32 logicalPageId)
loopLimit
=
grpPageRangePtr
.
p
->
currentIndexPos
;
ndbrequire
(
loopLimit
<=
3
);
for
(
Uint32
i
=
0
;
i
<=
loopLimit
;
i
++
)
{
l
jam
();
jam
();
if
(
grpPageRangePtr
.
p
->
startRange
[
i
]
<=
logicalPageId
)
{
if
(
grpPageRangePtr
.
p
->
endRange
[
i
]
>=
logicalPageId
)
{
if
(
grpPageRangePtr
.
p
->
type
[
i
]
==
ZLEAF
)
{
l
jam
();
jam
();
Uint32
realPageId
=
(
logicalPageId
-
grpPageRangePtr
.
p
->
startRange
[
i
])
+
grpPageRangePtr
.
p
->
basePageId
[
i
];
return
realPageId
;
...
...
@@ -167,12 +165,12 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
{
PageRangePtr
currPageRangePtr
;
if
(
cfirstfreerange
==
RNIL
)
{
l
jam
();
jam
();
return
false
;
}
//if
currPageRangePtr
.
i
=
regFragPtr
->
currentPageRange
;
if
(
currPageRangePtr
.
i
==
RNIL
)
{
l
jam
();
jam
();
/* ---------------------------------------------------------------- */
/* THE FIRST PAGE RANGE IS HANDLED WITH SPECIAL CODE */
/* ---------------------------------------------------------------- */
...
...
@@ -181,10 +179,10 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
currPageRangePtr
.
p
->
currentIndexPos
=
0
;
currPageRangePtr
.
p
->
parentPtr
=
RNIL
;
}
else
{
l
jam
();
jam
();
ptrCheckGuard
(
currPageRangePtr
,
cnoOfPageRangeRec
,
pageRange
);
if
(
currPageRangePtr
.
p
->
currentIndexPos
<
3
)
{
l
jam
();
jam
();
/* ---------------------------------------------------------------- */
/* THE SIMPLE CASE WHEN IT IS ONLY NECESSARY TO FILL IN THE */
/* NEXT EMPTY POSITION IN THE PAGE RANGE RECORD IS TREATED */
...
...
@@ -192,7 +190,7 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
/* ---------------------------------------------------------------- */
currPageRangePtr
.
p
->
currentIndexPos
++
;
}
else
{
l
jam
();
jam
();
ndbrequire
(
currPageRangePtr
.
p
->
currentIndexPos
==
3
);
currPageRangePtr
.
i
=
leafPageRangeFull
(
regFragPtr
,
currPageRangePtr
);
if
(
currPageRangePtr
.
i
==
RNIL
)
{
...
...
@@ -223,15 +221,15 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
PageRangePtr
loopPageRangePtr
;
loopPageRangePtr
=
currPageRangePtr
;
while
(
true
)
{
l
jam
();
jam
();
loopPageRangePtr
.
i
=
loopPageRangePtr
.
p
->
parentPtr
;
if
(
loopPageRangePtr
.
i
!=
RNIL
)
{
l
jam
();
jam
();
ptrCheckGuard
(
loopPageRangePtr
,
cnoOfPageRangeRec
,
pageRange
);
ndbrequire
(
loopPageRangePtr
.
p
->
currentIndexPos
<
4
);
loopPageRangePtr
.
p
->
endRange
[
loopPageRangePtr
.
p
->
currentIndexPos
]
+=
noPages
;
}
else
{
l
jam
();
jam
();
break
;
}
//if
}
//while
...
...
@@ -243,26 +241,26 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
void
Dbtup
::
releaseFragPages
(
Fragrecord
*
regFragPtr
)
{
if
(
regFragPtr
->
rootPageRange
==
RNIL
)
{
l
jam
();
jam
();
return
;
}
//if
PageRangePtr
regPRPtr
;
regPRPtr
.
i
=
regFragPtr
->
rootPageRange
;
ptrCheckGuard
(
regPRPtr
,
cnoOfPageRangeRec
,
pageRange
);
while
(
true
)
{
l
jam
();
jam
();
const
Uint32
indexPos
=
regPRPtr
.
p
->
currentIndexPos
;
ndbrequire
(
indexPos
<
4
);
const
Uint32
basePageId
=
regPRPtr
.
p
->
basePageId
[
indexPos
];
regPRPtr
.
p
->
basePageId
[
indexPos
]
=
RNIL
;
if
(
basePageId
==
RNIL
)
{
l
jam
();
jam
();
/**
* Finished with indexPos continue with next
*/
if
(
indexPos
>
0
)
{
l
jam
();
jam
();
regPRPtr
.
p
->
currentIndexPos
--
;
continue
;
}
//if
...
...
@@ -274,13 +272,13 @@ void Dbtup::releaseFragPages(Fragrecord* regFragPtr)
releasePagerange
(
regPRPtr
);
if
(
parentPtr
!=
RNIL
)
{
l
jam
();
jam
();
regPRPtr
.
i
=
parentPtr
;
ptrCheckGuard
(
regPRPtr
,
cnoOfPageRangeRec
,
pageRange
);
continue
;
}
//if
l
jam
();
jam
();
ndbrequire
(
regPRPtr
.
i
==
regFragPtr
->
rootPageRange
);
initFragRange
(
regFragPtr
);
for
(
Uint32
i
=
0
;
i
<
MAX_FREE_LIST
;
i
++
)
...
...
@@ -364,7 +362,7 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested
Uint32
retPageRef
=
RNIL
;
allocConsPages
(
noPagesToAllocate
,
noOfPagesAllocated
,
retPageRef
);
if
(
noOfPagesAllocated
==
0
)
{
l
jam
();
jam
();
return
tafpPagesAllocated
;
}
//if
/* ---------------------------------------------------------------- */
...
...
@@ -373,7 +371,7 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested
/* ---------------------------------------------------------------- */
Uint32
startRange
=
regFragPtr
->
nextStartRange
;
if
(
!
insertPageRangeTab
(
regFragPtr
,
retPageRef
,
noOfPagesAllocated
))
{
l
jam
();
jam
();
returnCommonArea
(
retPageRef
,
noOfPagesAllocated
);
return
tafpPagesAllocated
;
}
//if
...
...
@@ -388,7 +386,7 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested
/* ---------------------------------------------------------------- */
Uint32
prev
=
RNIL
;
for
(
loopPagePtr
.
i
=
retPageRef
;
loopPagePtr
.
i
<
loopLimit
;
loopPagePtr
.
i
++
)
{
l
jam
();
jam
();
c_page_pool
.
getPtr
(
loopPagePtr
);
loopPagePtr
.
p
->
page_state
=
ZEMPTY_MM
;
loopPagePtr
.
p
->
frag_page_id
=
startRange
+
...
...
@@ -416,10 +414,10 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested
/* WAS ENOUGH PAGES ALLOCATED OR ARE MORE NEEDED. */
/* ---------------------------------------------------------------- */
if
(
tafpPagesAllocated
<
tafpNoAllocRequested
)
{
l
jam
();
jam
();
}
else
{
ndbrequire
(
tafpPagesAllocated
==
tafpNoAllocRequested
);
l
jam
();
jam
();
return
tafpNoAllocRequested
;
}
//if
}
//while
...
...
@@ -451,15 +449,15 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
parentPageRangePtr
=
currPageRangePtr
;
Uint32
tiprNoLevels
=
1
;
while
(
true
)
{
l
jam
();
jam
();
parentPageRangePtr
.
i
=
parentPageRangePtr
.
p
->
parentPtr
;
if
(
parentPageRangePtr
.
i
==
RNIL
)
{
l
jam
();
jam
();
/* ---------------------------------------------------------------- */
/* WE HAVE REACHED THE ROOT. A NEW ROOT MUST BE ALLOCATED. */
/* ---------------------------------------------------------------- */
if
(
c_noOfFreePageRanges
<
tiprNoLevels
)
{
l
jam
();
jam
();
return
RNIL
;
}
//if
PageRangePtr
oldRootPRPtr
;
...
...
@@ -482,10 +480,10 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
foundPageRangePtr
=
newRootPRPtr
;
break
;
}
else
{
l
jam
();
jam
();
ptrCheckGuard
(
parentPageRangePtr
,
cnoOfPageRangeRec
,
pageRange
);
if
(
parentPageRangePtr
.
p
->
currentIndexPos
<
3
)
{
l
jam
();
jam
();
/* ---------------------------------------------------------------- */
/* WE HAVE FOUND AN EMPTY ENTRY IN A PAGE RANGE RECORD. */
/* ALLOCATE A NEW PAGE RANGE RECORD, FILL IN THE START RANGE, */
...
...
@@ -498,7 +496,7 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
foundPageRangePtr
=
parentPageRangePtr
;
break
;
}
else
{
l
jam
();
jam
();
ndbrequire
(
parentPageRangePtr
.
p
->
currentIndexPos
==
3
);
/* ---------------------------------------------------------------- */
/* THE PAGE RANGE RECORD WAS FULL. FIND THE PARENT RECORD */
...
...
@@ -516,7 +514,7 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
PageRangePtr
prevPageRangePtr
;
prevPageRangePtr
=
foundPageRangePtr
;
if
(
c_noOfFreePageRanges
<
tiprNoLevels
)
{
l
jam
();
jam
();
return
RNIL
;
}
//if
/* ---------------------------------------------------------------- */
...
...
@@ -527,7 +525,7 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
/* ARE ALSO PROPERLY UPDATED ON THE PATH TO THE LEAF LEVEL. */
/* ---------------------------------------------------------------- */
while
(
true
)
{
l
jam
();
jam
();
seizePagerange
(
newPageRangePtr
);
tiprNoLevels
--
;
ndbrequire
(
prevPageRangePtr
.
p
->
currentIndexPos
<
4
);
...
...
@@ -535,13 +533,13 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
newPageRangePtr
.
p
->
parentPtr
=
prevPageRangePtr
.
i
;
newPageRangePtr
.
p
->
currentIndexPos
=
0
;
if
(
tiprNoLevels
>
0
)
{
l
jam
();
jam
();
newPageRangePtr
.
p
->
startRange
[
0
]
=
regFragPtr
->
nextStartRange
;
newPageRangePtr
.
p
->
endRange
[
0
]
=
regFragPtr
->
nextStartRange
-
1
;
newPageRangePtr
.
p
->
type
[
0
]
=
ZNON_LEAF
;
prevPageRangePtr
=
newPageRangePtr
;
}
else
{
l
jam
();
jam
();
break
;
}
//if
}
//while
...
...
@@ -576,16 +574,16 @@ void Dbtup::errorHandler(Uint32 errorCode)
{
switch
(
errorCode
)
{
case
0
:
l
jam
();
jam
();
break
;
case
1
:
l
jam
();
jam
();
break
;
case
2
:
l
jam
();
jam
();
break
;
default:
l
jam
();
jam
();
}
ndbrequire
(
false
);
}
//Dbtup::errorHandler()
storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
View file @
be4db499
This diff is collapsed.
Click to expand it.
storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
View file @
be4db499
...
...
@@ -14,6 +14,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_SCAN_CPP
#include "Dbtup.hpp"
#include <signaldata/AccScan.hpp>
#include <signaldata/NextScan.hpp>
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp
View file @
be4db499
...
...
@@ -15,14 +15,12 @@
#define DBTUP_C
#define DBTUP_STORE_PROC_DEF_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#define ljam() { jamLine(18000 + __LINE__); }
#define ljamEntry() { jamEntryLine(18000 + __LINE__); }
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* ------------ADD/DROP STORED PROCEDURE MODULE ------------------- */
...
...
@@ -32,7 +30,7 @@ void Dbtup::execSTORED_PROCREQ(Signal* signal)
{
OperationrecPtr
regOperPtr
;
TablerecPtr
regTabPtr
;
l
jamEntry
();
jamEntry
();
regOperPtr
.
i
=
signal
->
theData
[
0
];
c_operation_pool
.
getPtr
(
regOperPtr
);
regTabPtr
.
i
=
signal
->
theData
[
1
];
...
...
@@ -46,17 +44,17 @@ void Dbtup::execSTORED_PROCREQ(Signal* signal)
ndbrequire
(
regTabPtr
.
p
->
tableStatus
==
DEFINED
);
switch
(
requestInfo
)
{
case
ZSCAN_PROCEDURE
:
l
jam
();
jam
();
scanProcedure
(
signal
,
regOperPtr
.
p
,
signal
->
theData
[
4
]);
break
;
case
ZCOPY_PROCEDURE
:
l
jam
();
jam
();
copyProcedure
(
signal
,
regTabPtr
,
regOperPtr
.
p
);
break
;
case
ZSTORED_PROCEDURE_DELETE
:
l
jam
();
jam
();
deleteScanProcedure
(
signal
,
regOperPtr
.
p
);
break
;
default:
...
...
@@ -124,14 +122,14 @@ void Dbtup::copyProcedure(Signal* signal,
AttributeHeader
::
init
(
&
signal
->
theData
[
length
+
1
],
Ti
,
0
);
length
++
;
if
(
length
==
24
)
{
l
jam
();
jam
();
ndbrequire
(
storedProcedureAttrInfo
(
signal
,
regOperPtr
,
signal
->
theData
+
1
,
length
,
true
));
length
=
0
;
}
//if
}
//for
if
(
length
!=
0
)
{
l
jam
();
jam
();
ndbrequire
(
storedProcedureAttrInfo
(
signal
,
regOperPtr
,
signal
->
theData
+
1
,
length
,
true
));
}
//if
...
...
@@ -155,7 +153,7 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal,
ndbrequire
(
regOperPtr
->
currentAttrinbufLen
<=
regOperPtr
->
attrinbufLen
);
if
((
RnoFree
>
MIN_ATTRBUF
)
||
(
copyProcedure
))
{
l
jam
();
jam
();
regAttrPtr
.
i
=
cfirstfreeAttrbufrec
;
ptrCheckGuard
(
regAttrPtr
,
cnoOfAttrbufrec
,
attrbufrec
);
regAttrPtr
.
p
->
attrbuf
[
ZBUF_DATA_LEN
]
=
0
;
...
...
@@ -163,18 +161,18 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal,
cnoFreeAttrbufrec
=
RnoFree
-
1
;
regAttrPtr
.
p
->
attrbuf
[
ZBUF_NEXT
]
=
RNIL
;
}
else
{
l
jam
();
jam
();
storedSeizeAttrinbufrecErrorLab
(
signal
,
regOperPtr
);
return
false
;
}
//if
if
(
regOperPtr
->
firstAttrinbufrec
==
RNIL
)
{
l
jam
();
jam
();
regOperPtr
->
firstAttrinbufrec
=
regAttrPtr
.
i
;
}
//if
regAttrPtr
.
p
->
attrbuf
[
ZBUF_NEXT
]
=
RNIL
;
if
(
regOperPtr
->
lastAttrinbufrec
!=
RNIL
)
{
AttrbufrecPtr
tempAttrinbufptr
;
l
jam
();
jam
();
tempAttrinbufptr
.
i
=
regOperPtr
->
lastAttrinbufrec
;
ptrCheckGuard
(
tempAttrinbufptr
,
cnoOfAttrbufrec
,
attrbufrec
);
tempAttrinbufptr
.
p
->
attrbuf
[
ZBUF_NEXT
]
=
regAttrPtr
.
i
;
...
...
@@ -187,7 +185,7 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal,
length
);
if
(
regOperPtr
->
currentAttrinbufLen
<
regOperPtr
->
attrinbufLen
)
{
l
jam
();
jam
();
return
true
;
}
//if
if
(
ERROR_INSERTED
(
4005
)
&&
!
copyProcedure
)
{
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
View file @
be4db499
...
...
@@ -15,14 +15,12 @@
#define DBTUP_C
#define DBTUP_TAB_DES_MAN_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#define ljam() { jamLine(22000 + __LINE__); }
#define ljamEntry() { jamEntryLine(22000 + __LINE__); }
/*
* TABLE DESCRIPTOR MEMORY MANAGER
*
...
...
@@ -65,30 +63,30 @@ Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset)
allocSize
=
(((
allocSize
-
1
)
>>
4
)
+
1
)
<<
4
;
Uint32
list
=
nextHigherTwoLog
(
allocSize
-
1
);
/* CALCULATE WHICH LIST IT BELONGS TO */
for
(
Uint32
i
=
list
;
i
<
16
;
i
++
)
{
l
jam
();
jam
();
if
(
cfreeTdList
[
i
]
!=
RNIL
)
{
l
jam
();
jam
();
reference
=
cfreeTdList
[
i
];
removeTdArea
(
reference
,
i
);
/* REMOVE THE AREA FROM THE FREELIST */
Uint32
retNo
=
(
1
<<
i
)
-
allocSize
;
/* CALCULATE THE DIFFERENCE */
if
(
retNo
>=
ZTD_FREE_SIZE
)
{
l
jam
();
jam
();
// return unused words, of course without attempting left merge
Uint32
retRef
=
reference
+
allocSize
;
freeTabDescr
(
retRef
,
retNo
,
false
);
}
else
{
l
jam
();
jam
();
allocSize
=
1
<<
i
;
}
//if
break
;
}
//if
}
//for
if
(
reference
==
RNIL
)
{
l
jam
();
jam
();
terrorCode
=
ZMEM_NOTABDESCR_ERROR
;
return
RNIL
;
}
else
{
l
jam
();
jam
();
setTabDescrWord
((
reference
+
allocSize
)
-
ZTD_TR_TYPE
,
ZTD_TYPE_NORMAL
);
setTabDescrWord
(
reference
+
ZTD_DATASIZE
,
allocSize
);
...
...
@@ -105,7 +103,7 @@ void Dbtup::freeTabDescr(Uint32 retRef, Uint32 retNo, bool normal)
{
itdaMergeTabDescr
(
retRef
,
retNo
,
normal
);
/* MERGE WITH POSSIBLE NEIGHBOURS */
while
(
retNo
>=
ZTD_FREE_SIZE
)
{
l
jam
();
jam
();
Uint32
list
=
nextHigherTwoLog
(
retNo
);
list
--
;
/* RETURN TO NEXT LOWER LIST */
Uint32
sizeOfChunk
=
1
<<
list
;
...
...
@@ -136,7 +134,7 @@ void Dbtup::insertTdArea(Uint32 tabDesRef, Uint32 list)
setTabDescrWord
(
tabDesRef
+
ZTD_FL_HEADER
,
ZTD_TYPE_FREE
);
setTabDescrWord
(
tabDesRef
+
ZTD_FL_NEXT
,
cfreeTdList
[
list
]);
if
(
cfreeTdList
[
list
]
!=
RNIL
)
{
l
jam
();
/* PREVIOUSLY EMPTY SLOT */
jam
();
/* PREVIOUSLY EMPTY SLOT */
setTabDescrWord
(
cfreeTdList
[
list
]
+
ZTD_FL_PREV
,
tabDesRef
);
}
//if
cfreeTdList
[
list
]
=
tabDesRef
;
/* RELINK THE LIST */
...
...
@@ -156,28 +154,28 @@ void Dbtup::itdaMergeTabDescr(Uint32& retRef, Uint32& retNo, bool normal)
{
// merge right
while
((
retRef
+
retNo
)
<
cnoOfTabDescrRec
)
{
l
jam
();
jam
();
Uint32
tabDesRef
=
retRef
+
retNo
;
Uint32
headerWord
=
getTabDescrWord
(
tabDesRef
+
ZTD_FL_HEADER
);
if
(
headerWord
==
ZTD_TYPE_FREE
)
{
l
jam
();
jam
();
Uint32
sizeOfMergedPart
=
getTabDescrWord
(
tabDesRef
+
ZTD_FL_SIZE
);
retNo
+=
sizeOfMergedPart
;
Uint32
list
=
nextHigherTwoLog
(
sizeOfMergedPart
-
1
);
removeTdArea
(
tabDesRef
,
list
);
}
else
{
l
jam
();
jam
();
break
;
}
}
// merge left
const
bool
mergeLeft
=
normal
;
while
(
mergeLeft
&&
retRef
>
0
)
{
l
jam
();
jam
();
Uint32
trailerWord
=
getTabDescrWord
(
retRef
-
ZTD_TR_TYPE
);
if
(
trailerWord
==
ZTD_TYPE_FREE
)
{
l
jam
();
jam
();
Uint32
sizeOfMergedPart
=
getTabDescrWord
(
retRef
-
ZTD_TR_SIZE
);
ndbrequire
(
retRef
>=
sizeOfMergedPart
);
retRef
-=
sizeOfMergedPart
;
...
...
@@ -185,7 +183,7 @@ void Dbtup::itdaMergeTabDescr(Uint32& retRef, Uint32& retNo, bool normal)
Uint32
list
=
nextHigherTwoLog
(
sizeOfMergedPart
-
1
);
removeTdArea
(
retRef
,
list
);
}
else
{
l
jam
();
jam
();
break
;
}
}
...
...
@@ -213,15 +211,15 @@ void Dbtup::removeTdArea(Uint32 tabDesRef, Uint32 list)
setTabDescrWord
((
tabDesRef
+
(
1
<<
list
))
-
ZTD_TR_TYPE
,
ZTD_TYPE_NORMAL
);
if
(
tabDesRef
==
cfreeTdList
[
list
])
{
l
jam
();
jam
();
cfreeTdList
[
list
]
=
tabDescrNextPtr
;
/* RELINK THE LIST */
}
//if
if
(
tabDescrNextPtr
!=
RNIL
)
{
l
jam
();
jam
();
setTabDescrWord
(
tabDescrNextPtr
+
ZTD_FL_PREV
,
tabDescrPrevPtr
);
}
//if
if
(
tabDescrPrevPtr
!=
RNIL
)
{
l
jam
();
jam
();
setTabDescrWord
(
tabDescrPrevPtr
+
ZTD_FL_NEXT
,
tabDescrNextPtr
);
}
//if
}
//Dbtup::removeTdArea()
...
...
storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
View file @
be4db499
This diff is collapsed.
Click to expand it.
storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp
View file @
be4db499
...
...
@@ -14,12 +14,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_VAR_ALLOC_CPP
#include "Dbtup.hpp"
#define ljam() { jamLine(32000 + __LINE__); }
#define ljamEntry() { jamEntryLine(32000 + __LINE__); }
void
Dbtup
::
init_list_sizes
(
void
)
{
c_min_list_size
[
0
]
=
200
;
...
...
@@ -109,9 +106,9 @@ Dbtup::alloc_var_part(Fragrecord* fragPtr,
PagePtr
pagePtr
;
pagePtr
.
i
=
get_alloc_page
(
fragPtr
,
(
alloc_size
+
1
));
if
(
pagePtr
.
i
==
RNIL
)
{
l
jam
();
jam
();
if
((
pagePtr
.
i
=
get_empty_var_page
(
fragPtr
))
==
RNIL
)
{
l
jam
();
jam
();
return
0
;
}
c_page_pool
.
getPtr
(
pagePtr
);
...
...
@@ -127,7 +124,7 @@ Dbtup::alloc_var_part(Fragrecord* fragPtr,
pagePtr
.
p
->
page_state
=
ZTH_MM_FREE
;
}
else
{
c_page_pool
.
getPtr
(
pagePtr
);
l
jam
();
jam
();
}
Uint32
idx
=
((
Var_page
*
)
pagePtr
.
p
)
->
alloc_record
(
alloc_size
,
(
Var_page
*
)
ctemp_page
,
Var_page
::
CHAIN
);
...
...
@@ -178,7 +175,7 @@ void Dbtup::free_var_rec(Fragrecord* fragPtr,
ndbassert
(
pagePtr
.
p
->
free_space
<=
Var_page
::
DATA_WORDS
);
if
(
pagePtr
.
p
->
free_space
==
Var_page
::
DATA_WORDS
-
1
)
{
l
jam
();
jam
();
/*
This code could be used when we release pages.
remove_free_page(signal,fragPtr,page_header,page_header->list_index);
...
...
@@ -186,7 +183,7 @@ void Dbtup::free_var_rec(Fragrecord* fragPtr,
*/
update_free_page_list
(
fragPtr
,
pagePtr
);
}
else
{
l
jam
();
jam
();
update_free_page_list
(
fragPtr
,
pagePtr
);
}
return
;
...
...
@@ -260,16 +257,16 @@ Dbtup::get_alloc_page(Fragrecord* fragPtr, Uint32 alloc_size)
start_index
=
calculate_free_list_impl
(
alloc_size
);
if
(
start_index
==
(
MAX_FREE_LIST
-
1
))
{
l
jam
();
jam
();
}
else
{
l
jam
();
jam
();
ndbrequire
(
start_index
<
(
MAX_FREE_LIST
-
1
));
start_index
++
;
}
for
(
i
=
start_index
;
i
<
MAX_FREE_LIST
;
i
++
)
{
l
jam
();
jam
();
if
(
!
fragPtr
->
free_var_page_array
[
i
].
isEmpty
())
{
l
jam
();
jam
();
return
fragPtr
->
free_var_page_array
[
i
].
firstItem
;
}
}
...
...
@@ -278,9 +275,9 @@ Dbtup::get_alloc_page(Fragrecord* fragPtr, Uint32 alloc_size)
LocalDLList
<
Page
>
list
(
c_page_pool
,
fragPtr
->
free_var_page_array
[
i
]);
for
(
list
.
first
(
pagePtr
);
!
pagePtr
.
isNull
()
&&
loop
<
16
;
)
{
l
jam
();
jam
();
if
(
pagePtr
.
p
->
free_space
>=
alloc_size
)
{
l
jam
();
jam
();
return
pagePtr
.
i
;
}
loop
++
;
...
...
@@ -347,7 +344,7 @@ void Dbtup::update_free_page_list(Fragrecord* fragPtr,
(
free_space
>
c_max_list_size
[
list_index
]))
{
Uint32
new_list_index
=
calculate_free_list_impl
(
free_space
);
if
(
list_index
!=
MAX_FREE_LIST
)
{
l
jam
();
jam
();
/*
* Only remove it from its list if it is in a list
*/
...
...
@@ -362,11 +359,11 @@ void Dbtup::update_free_page_list(Fragrecord* fragPtr,
This can only happen for the free list with least guaranteed
free space.
*/
l
jam
();
jam
();
ndbrequire
(
new_list_index
==
0
);
pagePtr
.
p
->
list_index
=
MAX_FREE_LIST
;
}
else
{
l
jam
();
jam
();
LocalDLList
<
Page
>
list
(
c_page_pool
,
fragPtr
->
free_var_page_array
[
new_list_index
]);
list
.
add
(
pagePtr
);
...
...
@@ -382,9 +379,9 @@ Uint32 Dbtup::calculate_free_list_impl(Uint32 free_space_size) const
{
Uint32
i
;
for
(
i
=
0
;
i
<
MAX_FREE_LIST
;
i
++
)
{
l
jam
();
jam
();
if
(
free_space_size
<=
c_max_list_size
[
i
])
{
l
jam
();
jam
();
return
i
;
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment