Commit 9fb5e5d0 authored by lzhou/zhl@dev3-63.(none)'s avatar lzhou/zhl@dev3-63.(none)

Merge lzhou@bk-internal.mysql.com:/home/bk/mysql-5.1-new-ndb-bj

into  dev3-63.(none):/home/zhl/mysql/mysql-5.1/bug26307
parents afd6dbbc be4db499
......@@ -32,6 +32,82 @@
#include <../pgman.hpp>
#include <../tsman.hpp>
// jams
#undef jam
#undef jamEntry
#ifdef DBTUP_BUFFER_CPP
#define jam() jamLine(10000 + __LINE__)
#define jamEntry() jamEntryLine(10000 + __LINE__)
#endif
#ifdef DBTUP_ROUTINES_CPP
#define jam() jamLine(15000 + __LINE__)
#define jamEntry() jamEntryLine(15000 + __LINE__)
#endif
#ifdef DBTUP_COMMIT_CPP
#define jam() jamLine(20000 + __LINE__)
#define jamEntry() jamEntryLine(20000 + __LINE__)
#endif
#ifdef DBTUP_FIXALLOC_CPP
#define jam() jamLine(25000 + __LINE__)
#define jamEntry() jamEntryLine(25000 + __LINE__)
#endif
#ifdef DBTUP_TRIGGER_CPP
#define jam() jamLine(30000 + __LINE__)
#define jamEntry() jamEntryLine(30000 + __LINE__)
#endif
#ifdef DBTUP_ABORT_CPP
#define jam() jamLine(35000 + __LINE__)
#define jamEntry() jamEntryLine(35000 + __LINE__)
#endif
#ifdef DBTUP_PAGE_MAP_CPP
#define jam() jamLine(40000 + __LINE__)
#define jamEntry() jamEntryLine(40000 + __LINE__)
#endif
#ifdef DBTUP_PAG_MAN_CPP
#define jam() jamLine(45000 + __LINE__)
#define jamEntry() jamEntryLine(45000 + __LINE__)
#endif
#ifdef DBTUP_STORE_PROC_DEF_CPP
#define jam() jamLine(50000 + __LINE__)
#define jamEntry() jamEntryLine(50000 + __LINE__)
#endif
#ifdef DBTUP_META_CPP
#define jam() jamLine(55000 + __LINE__)
#define jamEntry() jamEntryLine(55000 + __LINE__)
#endif
#ifdef DBTUP_TAB_DES_MAN_CPP
#define jam() jamLine(60000 + __LINE__)
#define jamEntry() jamEntryLine(60000 + __LINE__)
#endif
#ifdef DBTUP_GEN_CPP
#define jam() jamLine(65000 + __LINE__)
#define jamEntry() jamEntryLine(65000 + __LINE__)
#endif
#ifdef DBTUP_INDEX_CPP
#define jam() jamLine(70000 + __LINE__)
#define jamEntry() jamEntryLine(70000 + __LINE__)
#endif
#ifdef DBTUP_DEBUG_CPP
#define jam() jamLine(75000 + __LINE__)
#define jamEntry() jamEntryLine(75000 + __LINE__)
#endif
#ifdef DBTUP_VAR_ALLOC_CPP
#define jam() jamLine(80000 + __LINE__)
#define jamEntry() jamEntryLine(80000 + __LINE__)
#endif
#ifdef DBTUP_SCAN_CPP
#define jam() jamLine(85000 + __LINE__)
#define jamEntry() jamEntryLine(85000 + __LINE__)
#endif
#ifdef DBTUP_DISK_ALLOC_CPP
#define jam() jamLine(90000 + __LINE__)
#define jamEntry() jamEntryLine(90000 + __LINE__)
#endif
#ifndef jam
#define jam() jamLine(__LINE__)
#define jamEntry() jamEntryLine(__LINE__)
#endif
#ifdef VM_TRACE
inline const char* dbgmask(const Bitmask<MAXNROFATTRIBUTESINWORDS>& bm) {
static int i=0; static char buf[5][200];
......@@ -70,22 +146,23 @@ inline const Uint32* ALIGN_WORD(const void* ptr)
// only reports the line number in the file it currently is located in.
//
// DbtupExecQuery.cpp 0
// DbtupBuffer.cpp 2000
// DbtupRoutines.cpp 3000
// DbtupCommit.cpp 5000
// DbtupFixAlloc.cpp 6000
// DbtupTrigger.cpp 7000
// DbtupAbort.cpp 9000
// DbtupPageMap.cpp 14000
// DbtupPagMan.cpp 16000
// DbtupStoredProcDef.cpp 18000
// DbtupMeta.cpp 20000
// DbtupTabDesMan.cpp 22000
// DbtupGen.cpp 24000
// DbtupIndex.cpp 28000
// DbtupDebug.cpp 30000
// DbtupVarAlloc.cpp 32000
// DbtupScan.cpp 33000
// DbtupBuffer.cpp 10000
// DbtupRoutines.cpp 15000
// DbtupCommit.cpp 20000
// DbtupFixAlloc.cpp 25000
// DbtupTrigger.cpp 30000
// DbtupAbort.cpp 35000
// DbtupPageMap.cpp 40000
// DbtupPagMan.cpp 45000
// DbtupStoredProcDef.cpp 50000
// DbtupMeta.cpp 55000
// DbtupTabDesMan.cpp 60000
// DbtupGen.cpp 65000
// DbtupIndex.cpp 70000
// DbtupDebug.cpp 75000
// DbtupVarAlloc.cpp 80000
// DbtupScan.cpp 85000
// DbtupDiskAlloc.cpp 90000
//------------------------------------------------------------------
/*
......
......@@ -14,21 +14,19 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_ABORT_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#define ljam() { jamLine(9000 + __LINE__); }
#define ljamEntry() { jamEntryLine(9000 + __LINE__); }
void Dbtup::freeAllAttrBuffers(Operationrec* const regOperPtr)
{
if (regOperPtr->storedProcedureId == RNIL) {
ljam();
jam();
freeAttrinbufrec(regOperPtr->firstAttrinbufrec);
} else {
ljam();
jam();
StoredProcPtr storedPtr;
c_storedProcPool.getPtr(storedPtr, (Uint32)regOperPtr->storedProcedureId);
ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE);
......@@ -46,7 +44,7 @@ void Dbtup::freeAttrinbufrec(Uint32 anAttrBuf)
Uint32 RnoFree = cnoFreeAttrbufrec;
localAttrBufPtr.i = anAttrBuf;
while (localAttrBufPtr.i != RNIL) {
ljam();
jam();
ptrCheckGuard(localAttrBufPtr, cnoOfAttrbufrec, attrbufrec);
Ttemp = localAttrBufPtr.p->attrbuf[ZBUF_NEXT];
localAttrBufPtr.p->attrbuf[ZBUF_NEXT] = cfirstfreeAttrbufrec;
......@@ -62,7 +60,7 @@ void Dbtup::freeAttrinbufrec(Uint32 anAttrBuf)
*/
void Dbtup::execTUP_ABORTREQ(Signal* signal)
{
ljamEntry();
jamEntry();
do_tup_abortreq(signal, 0);
}
......@@ -80,7 +78,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags)
(trans_state == TRANS_ERROR_WAIT_TUPKEYREQ) ||
(trans_state == TRANS_IDLE));
if (regOperPtr.p->op_struct.op_type == ZREAD) {
ljam();
jam();
freeAllAttrBuffers(regOperPtr.p);
initOpConnection(regOperPtr.p);
return;
......@@ -94,7 +92,7 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags)
if (get_tuple_state(regOperPtr.p) == TUPLE_PREPARED)
{
ljam();
jam();
if (!regTabPtr.p->tuxCustomTriggers.isEmpty() &&
(flags & ZSKIP_TUX_TRIGGERS) == 0)
executeTuxAbortTriggers(signal,
......@@ -105,12 +103,12 @@ void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags)
OperationrecPtr loopOpPtr;
loopOpPtr.i = regOperPtr.p->nextActiveOp;
while (loopOpPtr.i != RNIL) {
ljam();
jam();
c_operation_pool.getPtr(loopOpPtr);
if (get_tuple_state(loopOpPtr.p) != TUPLE_ALREADY_ABORTED &&
!regTabPtr.p->tuxCustomTriggers.isEmpty() &&
(flags & ZSKIP_TUX_TRIGGERS) == 0) {
ljam();
jam();
executeTuxAbortTriggers(signal,
loopOpPtr.p,
regFragPtr.p,
......@@ -211,116 +209,116 @@ int Dbtup::TUPKEY_abort(Signal* signal, int error_type)
case 1:
//tmupdate_alloc_error:
terrorCode= ZMEM_NOMEM_ERROR;
ljam();
jam();
break;
case 15:
ljam();
jam();
terrorCode = ZREGISTER_INIT_ERROR;
break;
case 16:
ljam();
jam();
terrorCode = ZTRY_TO_UPDATE_ERROR;
break;
case 17:
ljam();
jam();
terrorCode = ZNO_ILLEGAL_NULL_ATTR;
break;
case 19:
ljam();
jam();
terrorCode = ZTRY_TO_UPDATE_ERROR;
break;
case 20:
ljam();
jam();
terrorCode = ZREGISTER_INIT_ERROR;
break;
case 22:
ljam();
jam();
terrorCode = ZTOTAL_LEN_ERROR;
break;
case 23:
ljam();
jam();
terrorCode = ZREGISTER_INIT_ERROR;
break;
case 24:
ljam();
jam();
terrorCode = ZREGISTER_INIT_ERROR;
break;
case 26:
ljam();
jam();
terrorCode = ZREGISTER_INIT_ERROR;
break;
case 27:
ljam();
jam();
terrorCode = ZREGISTER_INIT_ERROR;
break;
case 28:
ljam();
jam();
terrorCode = ZREGISTER_INIT_ERROR;
break;
case 29:
ljam();
jam();
break;
case 30:
ljam();
jam();
terrorCode = ZCALL_ERROR;
break;
case 31:
ljam();
jam();
terrorCode = ZSTACK_OVERFLOW_ERROR;
break;
case 32:
ljam();
jam();
terrorCode = ZSTACK_UNDERFLOW_ERROR;
break;
case 33:
ljam();
jam();
terrorCode = ZNO_INSTRUCTION_ERROR;
break;
case 34:
ljam();
jam();
terrorCode = ZOUTSIDE_OF_PROGRAM_ERROR;
break;
case 35:
ljam();
jam();
terrorCode = ZTOO_MANY_INSTRUCTIONS_ERROR;
break;
case 38:
ljam();
jam();
terrorCode = ZTEMPORARY_RESOURCE_FAILURE;
break;
case 39:
if (get_trans_state(operPtr.p) == TRANS_TOO_MUCH_AI) {
ljam();
jam();
terrorCode = ZTOO_MUCH_ATTRINFO_ERROR;
} else if (get_trans_state(operPtr.p) == TRANS_ERROR_WAIT_TUPKEYREQ) {
ljam();
jam();
terrorCode = ZSEIZE_ATTRINBUFREC_ERROR;
} else {
ndbrequire(false);
}//if
break;
case 40:
ljam();
jam();
terrorCode = ZUNSUPPORTED_BRANCH;
break;
default:
......
......@@ -14,28 +14,26 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_BUFFER_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#include <signaldata/TransIdAI.hpp>
#define ljam() { jamLine(2000 + __LINE__); }
#define ljamEntry() { jamEntryLine(2000 + __LINE__); }
void Dbtup::execSEND_PACKED(Signal* signal)
{
Uint16 hostId;
Uint32 i;
Uint32 TpackedListIndex= cpackedListIndex;
ljamEntry();
jamEntry();
for (i= 0; i < TpackedListIndex; i++) {
ljam();
jam();
hostId= cpackedList[i];
ndbrequire((hostId - 1) < (MAX_NODES - 1)); // Also check not zero
Uint32 TpacketTA= hostBuffer[hostId].noOfPacketsTA;
if (TpacketTA != 0) {
ljam();
jam();
BlockReference TBref= numberToRef(API_PACKED, hostId);
Uint32 TpacketLen= hostBuffer[hostId].packetLenTA;
MEMCOPY_NO_WORDS(&signal->theData[0],
......@@ -73,7 +71,7 @@ void Dbtup::bufferTRANSID_AI(Signal* signal, BlockReference aRef,
// There is still space in the buffer. We will copy it into the
// buffer.
// ----------------------------------------------------------------
ljam();
jam();
updatePackedList(signal, hostId);
} else if (false && TnoOfPackets == 1) {
// ----------------------------------------------------------------
......@@ -118,7 +116,7 @@ void Dbtup::updatePackedList(Signal* signal, Uint16 hostId)
{
if (hostBuffer[hostId].inPackedList == false) {
Uint32 TpackedListIndex= cpackedListIndex;
ljam();
jam();
hostBuffer[hostId].inPackedList= true;
cpackedList[TpackedListIndex]= hostId;
cpackedListIndex= TpackedListIndex + 1;
......@@ -149,7 +147,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
if (ERROR_INSERTED(4006) && (nodeId != getOwnNodeId())){
// Use error insert to turn routing on
ljam();
jam();
connectedToNode= false;
}
......@@ -167,18 +165,18 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
* Own node -> execute direct
*/
if(nodeId != getOwnNodeId()){
ljam();
jam();
/**
* Send long sig
*/
if (ToutBufIndex >= 22 && is_api && !old_dest) {
ljam();
jam();
/**
* Flush buffer so that order is maintained
*/
if (TpacketTA != 0) {
ljam();
jam();
BlockReference TBref = numberToRef(API_PACKED, nodeId);
MEMCOPY_NO_WORDS(&signal->theData[0],
&hostBuffer[nodeId].packetBufferTA[0],
......@@ -202,7 +200,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
*/
#ifndef NDB_NO_DROPPED_SIGNAL
if (ToutBufIndex < 22 && is_api){
ljam();
jam();
bufferTRANSID_AI(signal, recBlockref, 3+ToutBufIndex);
return;
}
......@@ -214,7 +212,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
Uint32 * src= signal->theData+25;
if (ToutBufIndex >= 22){
do {
ljam();
jam();
MEMCOPY_NO_WORDS(&signal->theData[3], src, 22);
sendSignal(recBlockref, GSN_TRANSID_AI, signal, 25, JBB);
ToutBufIndex -= 22;
......@@ -223,14 +221,14 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
}
if (ToutBufIndex > 0){
ljam();
jam();
MEMCOPY_NO_WORDS(&signal->theData[3], src, ToutBufIndex);
sendSignal(recBlockref, GSN_TRANSID_AI, signal, 3+ToutBufIndex, JBB);
}
return;
}
EXECUTE_DIRECT(block, GSN_TRANSID_AI, signal, 3 + ToutBufIndex);
ljamEntry();
jamEntry();
return;
}
......@@ -242,7 +240,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
Uint32 routeBlockref= req_struct->TC_ref;
if (true){ // TODO is_api && !old_dest){
ljam();
jam();
transIdAI->attrData[0]= recBlockref;
LinearSectionPtr ptr[3];
ptr[0].p= &signal->theData[25];
......@@ -260,7 +258,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
Uint32 sent= 0;
Uint32 maxLen= TransIdAI::DataLength - 1;
while (sent < tot) {
ljam();
jam();
Uint32 dataLen= (tot - sent > maxLen) ? maxLen : tot - sent;
Uint32 sigLen= dataLen + TransIdAI::HeaderLength + 1;
MEMCOPY_NO_WORDS(&transIdAI->attrData,
......
......@@ -14,6 +14,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_COMMIT_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
......@@ -21,16 +22,13 @@
#include <signaldata/TupCommit.hpp>
#include "../dblqh/Dblqh.hpp"
#define ljam() { jamLine(5000 + __LINE__); }
#define ljamEntry() { jamEntryLine(5000 + __LINE__); }
void Dbtup::execTUP_DEALLOCREQ(Signal* signal)
{
TablerecPtr regTabPtr;
FragrecordPtr regFragPtr;
Uint32 frag_page_id, frag_id;
ljamEntry();
jamEntry();
frag_id= signal->theData[0];
regTabPtr.i= signal->theData[1];
......@@ -62,7 +60,7 @@ void Dbtup::execTUP_DEALLOCREQ(Signal* signal)
if (regTabPtr.p->m_attributes[MM].m_no_of_varsize)
{
ljam();
jam();
free_var_rec(regFragPtr.p, regTabPtr.p, &tmp, pagePtr);
} else {
free_fix_rec(regFragPtr.p, regTabPtr.p, &tmp, (Fix_page*)pagePtr.p);
......@@ -78,7 +76,7 @@ void Dbtup::execTUP_WRITELOG_REQ(Signal* signal)
Uint32 gci= signal->theData[1];
c_operation_pool.getPtr(loopOpPtr);
while (loopOpPtr.p->prevActiveOp != RNIL) {
ljam();
jam();
loopOpPtr.i= loopOpPtr.p->prevActiveOp;
c_operation_pool.getPtr(loopOpPtr);
}
......@@ -87,11 +85,11 @@ void Dbtup::execTUP_WRITELOG_REQ(Signal* signal)
signal->theData[0]= loopOpPtr.p->userpointer;
signal->theData[1]= gci;
if (loopOpPtr.p->nextActiveOp == RNIL) {
ljam();
jam();
EXECUTE_DIRECT(DBLQH, GSN_LQH_WRITELOG_REQ, signal, 2);
return;
}
ljam();
jam();
EXECUTE_DIRECT(DBLQH, GSN_LQH_WRITELOG_REQ, signal, 2);
jamEntry();
loopOpPtr.i= loopOpPtr.p->nextActiveOp;
......@@ -114,16 +112,16 @@ void Dbtup::removeActiveOpList(Operationrec* const regOperPtr,
if (regOperPtr->op_struct.in_active_list) {
regOperPtr->op_struct.in_active_list= false;
if (regOperPtr->nextActiveOp != RNIL) {
ljam();
jam();
raoOperPtr.i= regOperPtr->nextActiveOp;
c_operation_pool.getPtr(raoOperPtr);
raoOperPtr.p->prevActiveOp= regOperPtr->prevActiveOp;
} else {
ljam();
jam();
tuple_ptr->m_operation_ptr_i = regOperPtr->prevActiveOp;
}
if (regOperPtr->prevActiveOp != RNIL) {
ljam();
jam();
raoOperPtr.i= regOperPtr->prevActiveOp;
c_operation_pool.getPtr(raoOperPtr);
raoOperPtr.p->nextActiveOp= regOperPtr->nextActiveOp;
......@@ -343,7 +341,7 @@ Dbtup::disk_page_commit_callback(Signal* signal,
Uint32 gci;
OperationrecPtr regOperPtr;
ljamEntry();
jamEntry();
c_operation_pool.getPtr(regOperPtr, opPtrI);
c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci);
......@@ -379,7 +377,7 @@ Dbtup::disk_page_log_buffer_callback(Signal* signal,
Uint32 gci;
OperationrecPtr regOperPtr;
ljamEntry();
jamEntry();
c_operation_pool.getPtr(regOperPtr, opPtrI);
c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci);
......@@ -447,7 +445,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
TupCommitReq * const tupCommitReq= (TupCommitReq *)signal->getDataPtr();
regOperPtr.i= tupCommitReq->opPtr;
ljamEntry();
jamEntry();
c_operation_pool.getPtr(regOperPtr);
if(!regOperPtr.p->is_first_operation())
......@@ -603,7 +601,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
* why can't we instead remove "own version" (when approriate ofcourse)
*/
if (!regTabPtr.p->tuxCustomTriggers.isEmpty()) {
ljam();
jam();
OperationrecPtr loopPtr= regOperPtr;
while(loopPtr.i != RNIL)
{
......@@ -656,18 +654,18 @@ Dbtup::set_change_mask_info(KeyReqStruct * const req_struct,
{
ChangeMaskState state = get_change_mask_state(regOperPtr);
if (state == USE_SAVED_CHANGE_MASK) {
ljam();
jam();
req_struct->changeMask.setWord(0, regOperPtr->saved_change_mask[0]);
req_struct->changeMask.setWord(1, regOperPtr->saved_change_mask[1]);
} else if (state == RECALCULATE_CHANGE_MASK) {
ljam();
jam();
// Recompute change mask, for now set all bits
req_struct->changeMask.set();
} else if (state == SET_ALL_MASK) {
ljam();
jam();
req_struct->changeMask.set();
} else {
ljam();
jam();
ndbrequire(state == DELETE_CHANGES);
req_struct->changeMask.set();
}
......@@ -687,17 +685,17 @@ Dbtup::calculateChangeMask(Page* const pagePtr,
ndbrequire(loopOpPtr.p->op_struct.op_type == ZUPDATE);
ChangeMaskState change_mask= get_change_mask_state(loopOpPtr.p);
if (change_mask == USE_SAVED_CHANGE_MASK) {
ljam();
jam();
saved_word1|= loopOpPtr.p->saved_change_mask[0];
saved_word2|= loopOpPtr.p->saved_change_mask[1];
} else if (change_mask == RECALCULATE_CHANGE_MASK) {
ljam();
jam();
//Recompute change mask, for now set all bits
req_struct->changeMask.set();
return;
} else {
ndbrequire(change_mask == SET_ALL_MASK);
ljam();
jam();
req_struct->changeMask.set();
return;
}
......
......@@ -15,6 +15,7 @@
#define DBTUP_C
#define DBTUP_DEBUG_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
......@@ -24,9 +25,6 @@
#include <signaldata/EventReport.hpp>
#include <Vector.hpp>
#define ljam() { jamLine(30000 + __LINE__); }
#define ljamEntry() { jamEntryLine(30000 + __LINE__); }
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* ------------------------ DEBUG MODULE -------------------------- */
......@@ -35,7 +33,7 @@
void Dbtup::execDEBUG_SIG(Signal* signal)
{
PagePtr regPagePtr;
ljamEntry();
jamEntry();
regPagePtr.i = signal->theData[0];
c_page_pool.getPtr(regPagePtr);
}//Dbtup::execDEBUG_SIG()
......@@ -248,18 +246,18 @@ void Dbtup::execMEMCHECKREQ(Signal* signal)
PagePtr regPagePtr;
Uint32* data = &signal->theData[0];
ljamEntry();
jamEntry();
BlockReference blockref = signal->theData[0];
Uint32 i;
for (i = 0; i < 25; i++) {
ljam();
jam();
data[i] = 0;
}//for
for (i = 0; i < 16; i++) {
regPagePtr.i = cfreepageList[i];
ljam();
jam();
while (regPagePtr.i != RNIL) {
ljam();
jam();
ptrCheckGuard(regPagePtr, cnoOfPage, cpage);
regPagePtr.i = regPagePtr.p->next_page;
data[0]++;
......
......@@ -14,6 +14,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_DISK_ALLOC_CPP
#include "Dbtup.hpp"
static bool f_undo_done = true;
......
......@@ -14,14 +14,12 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_FIXALLOC_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#define ljam() { jamLine(6000 + __LINE__); }
#define ljamEntry() { jamEntryLine(6000 + __LINE__); }
//
// Fixed Allocator
// This module is used to allocate and free fixed size tuples from the
......@@ -79,7 +77,7 @@ Dbtup::alloc_fix_rec(Fragrecord* const regFragPtr,
/* ---------------------------------------------------------------- */
pagePtr.i = getEmptyPage(regFragPtr);
if (pagePtr.i != RNIL) {
ljam();
jam();
/* ---------------------------------------------------------------- */
// We found empty pages on the fragment. Allocate an empty page and
// convert it into a tuple header page and put it in thFreeFirst-list.
......@@ -95,14 +93,14 @@ Dbtup::alloc_fix_rec(Fragrecord* const regFragPtr,
LocalDLList<Page> free_pages(c_page_pool, regFragPtr->thFreeFirst);
free_pages.add(pagePtr);
} else {
ljam();
jam();
/* ---------------------------------------------------------------- */
/* THERE ARE NO EMPTY PAGES. MEMORY CAN NOT BE ALLOCATED. */
/* ---------------------------------------------------------------- */
return 0;
}
} else {
ljam();
jam();
/* ---------------------------------------------------------------- */
/* THIS SHOULD BE THE COMMON PATH THROUGH THE CODE, FREE */
/* COPY PAGE EXISTED. */
......@@ -194,7 +192,7 @@ void Dbtup::free_fix_rec(Fragrecord* regFragPtr,
if(free == 1)
{
ljam();
jam();
PagePtr pagePtr = { (Page*)regPagePtr, key->m_page_no };
LocalDLList<Page> free_pages(c_page_pool, regFragPtr->thFreeFirst);
ndbrequire(regPagePtr->page_state == ZTH_MM_FULL);
......
......@@ -15,6 +15,7 @@
#define DBTUP_C
#define DBTUP_GEN_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
......@@ -34,9 +35,6 @@
#define DEBUG(x) { ndbout << "TUP::" << x << endl; }
#define ljam() { jamLine(24000 + __LINE__); }
#define ljamEntry() { jamEntryLine(24000 + __LINE__); }
void Dbtup::initData()
{
cnoOfAttrbufrec = ZNO_OF_ATTRBUFREC;
......@@ -152,21 +150,21 @@ BLOCK_FUNCTIONS(Dbtup)
void Dbtup::execCONTINUEB(Signal* signal)
{
ljamEntry();
jamEntry();
Uint32 actionType = signal->theData[0];
Uint32 dataPtr = signal->theData[1];
switch (actionType) {
case ZINITIALISE_RECORDS:
ljam();
jam();
initialiseRecordsLab(signal, dataPtr,
signal->theData[2], signal->theData[3]);
break;
case ZREL_FRAG:
ljam();
jam();
releaseFragment(signal, dataPtr, signal->theData[2]);
break;
case ZREPORT_MEMORY_USAGE:{
ljam();
jam();
static int c_currentMemUsed = 0;
Uint32 cnt = signal->theData[1];
Uint32 tmp = c_page_pool.getSize();
......@@ -201,11 +199,11 @@ void Dbtup::execCONTINUEB(Signal* signal)
return;
}
case ZBUILD_INDEX:
ljam();
jam();
buildIndex(signal, dataPtr);
break;
case ZTUP_SCAN:
ljam();
jam();
{
ScanOpPtr scanPtr;
c_scanOpPool.getPtr(scanPtr, dataPtr);
......@@ -214,7 +212,7 @@ void Dbtup::execCONTINUEB(Signal* signal)
return;
case ZFREE_EXTENT:
{
ljam();
jam();
TablerecPtr tabPtr;
tabPtr.i= dataPtr;
......@@ -227,7 +225,7 @@ void Dbtup::execCONTINUEB(Signal* signal)
}
case ZUNMAP_PAGES:
{
ljam();
jam();
TablerecPtr tabPtr;
tabPtr.i= dataPtr;
......@@ -240,7 +238,7 @@ void Dbtup::execCONTINUEB(Signal* signal)
}
case ZFREE_VAR_PAGES:
{
ljam();
jam();
drop_fragment_free_var_pages(signal);
return;
}
......@@ -257,12 +255,12 @@ void Dbtup::execCONTINUEB(Signal* signal)
/* **************************************************************** */
void Dbtup::execSTTOR(Signal* signal)
{
ljamEntry();
jamEntry();
Uint32 startPhase = signal->theData[1];
Uint32 sigKey = signal->theData[6];
switch (startPhase) {
case ZSTARTPHASE1:
ljam();
jam();
CLEAR_ERROR_INSERT_VALUE;
ndbrequire((c_lqh= (Dblqh*)globalData.getBlock(DBLQH)) != 0);
ndbrequire((c_tsman= (Tsman*)globalData.getBlock(TSMAN)) != 0);
......@@ -270,7 +268,7 @@ void Dbtup::execSTTOR(Signal* signal)
cownref = calcTupBlockRef(0);
break;
default:
ljam();
jam();
break;
}//switch
signal->theData[0] = sigKey;
......@@ -293,7 +291,7 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
Uint32 senderData = req->senderData;
ndbrequire(req->noOfParameters == 0);
ljamEntry();
jamEntry();
const ndb_mgm_configuration_iterator * p =
m_ctx.m_config.getOwnConfigIterator();
......@@ -413,58 +411,58 @@ void Dbtup::initialiseRecordsLab(Signal* signal, Uint32 switchData,
{
switch (switchData) {
case 0:
ljam();
jam();
initializeHostBuffer();
break;
case 1:
ljam();
jam();
initializeOperationrec();
break;
case 2:
ljam();
jam();
initializePage();
break;
case 3:
ljam();
jam();
break;
case 4:
ljam();
jam();
initializeTablerec();
break;
case 5:
ljam();
jam();
break;
case 6:
ljam();
jam();
initializeFragrecord();
break;
case 7:
ljam();
jam();
initializeFragoperrec();
break;
case 8:
ljam();
jam();
initializePageRange();
break;
case 9:
ljam();
jam();
initializeTabDescr();
break;
case 10:
ljam();
jam();
break;
case 11:
ljam();
jam();
break;
case 12:
ljam();
jam();
initializeAttrbufrec();
break;
case 13:
ljam();
jam();
break;
case 14:
ljam();
jam();
{
ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
......@@ -488,28 +486,28 @@ void Dbtup::initialiseRecordsLab(Signal* signal, Uint32 switchData,
void Dbtup::execNDB_STTOR(Signal* signal)
{
ljamEntry();
jamEntry();
cndbcntrRef = signal->theData[0];
Uint32 ownNodeId = signal->theData[1];
Uint32 startPhase = signal->theData[2];
switch (startPhase) {
case ZSTARTPHASE1:
ljam();
jam();
cownNodeId = ownNodeId;
cownref = calcTupBlockRef(ownNodeId);
break;
case ZSTARTPHASE2:
ljam();
jam();
break;
case ZSTARTPHASE3:
ljam();
jam();
startphase3Lab(signal, ~0, ~0);
break;
case ZSTARTPHASE4:
ljam();
jam();
break;
case ZSTARTPHASE6:
ljam();
jam();
/*****************************************/
/* NOW SET THE DISK WRITE SPEED TO */
/* PAGES PER TICK AFTER SYSTEM */
......@@ -520,7 +518,7 @@ void Dbtup::execNDB_STTOR(Signal* signal)
sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 1);
break;
default:
ljam();
jam();
break;
}//switch
signal->theData[0] = cownref;
......@@ -597,7 +595,7 @@ void Dbtup::initializeTablerec()
{
TablerecPtr regTabPtr;
for (regTabPtr.i = 0; regTabPtr.i < cnoOfTablerec; regTabPtr.i++) {
ljam();
jam();
refresh_watch_dog();
ptrAss(regTabPtr, tablerec);
initTab(regTabPtr.p);
......@@ -668,12 +666,12 @@ void Dbtup::initializeTabDescr()
void Dbtup::execTUPSEIZEREQ(Signal* signal)
{
OperationrecPtr regOperPtr;
ljamEntry();
jamEntry();
Uint32 userPtr = signal->theData[0];
BlockReference userRef = signal->theData[1];
if (!c_operation_pool.seize(regOperPtr))
{
ljam();
jam();
signal->theData[0] = userPtr;
signal->theData[1] = ZGET_OPREC_ERROR;
sendSignal(userRef, GSN_TUPSEIZEREF, signal, 2, JBB);
......@@ -707,7 +705,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* signal)
void Dbtup::execTUPRELEASEREQ(Signal* signal)
{
OperationrecPtr regOperPtr;
ljamEntry();
jamEntry();
regOperPtr.i = signal->theData[0];
c_operation_pool.getPtr(regOperPtr);
set_trans_state(regOperPtr.p, TRANS_DISCONNECTED);
......
......@@ -14,6 +14,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_INDEX_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
......@@ -23,9 +24,6 @@
#include <AttributeHeader.hpp>
#include <signaldata/TuxMaint.hpp>
#define ljam() { jamLine(28000 + __LINE__); }
#define ljamEntry() { jamEntryLine(28000 + __LINE__); }
// methods used by ordered index
void
......@@ -34,7 +32,7 @@ Dbtup::tuxGetTupAddr(Uint32 fragPtrI,
Uint32 pageIndex,
Uint32& tupAddr)
{
ljamEntry();
jamEntry();
PagePtr pagePtr;
c_page_pool.getPtr(pagePtr, pageId);
Uint32 fragPageId= pagePtr.p->frag_page_id;
......@@ -48,7 +46,7 @@ Dbtup::tuxAllocNode(Signal* signal,
Uint32& pageOffset,
Uint32*& node)
{
ljamEntry();
jamEntry();
FragrecordPtr fragPtr;
fragPtr.i= fragPtrI;
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
......@@ -61,7 +59,7 @@ Dbtup::tuxAllocNode(Signal* signal,
Uint32* ptr, frag_page_id;
if ((ptr= alloc_fix_rec(fragPtr.p, tablePtr.p, &key, &frag_page_id)) == 0)
{
ljam();
jam();
terrorCode = ZMEM_NOMEM_ERROR; // caller sets error
return terrorCode;
}
......@@ -82,7 +80,7 @@ Dbtup::tuxFreeNode(Signal* signal,
Uint32 pageOffset,
Uint32* node)
{
ljamEntry();
jamEntry();
FragrecordPtr fragPtr;
fragPtr.i= fragPtrI;
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
......@@ -105,7 +103,7 @@ Dbtup::tuxGetNode(Uint32 fragPtrI,
Uint32 pageOffset,
Uint32*& node)
{
ljamEntry();
jamEntry();
FragrecordPtr fragPtr;
fragPtr.i= fragPtrI;
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
......@@ -130,7 +128,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI,
Uint32 numAttrs,
Uint32* dataOut)
{
ljamEntry();
jamEntry();
// use own variables instead of globals
FragrecordPtr fragPtr;
fragPtr.i= fragPtrI;
......@@ -150,21 +148,21 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI,
Tuple_header *tuple_ptr= req_struct.m_tuple_ptr;
if (tuple_ptr->get_tuple_version() != tupVersion)
{
ljam();
jam();
OperationrecPtr opPtr;
opPtr.i= tuple_ptr->m_operation_ptr_i;
Uint32 loopGuard= 0;
while (opPtr.i != RNIL) {
c_operation_pool.getPtr(opPtr);
if (opPtr.p->tupVersion == tupVersion) {
ljam();
jam();
if (!opPtr.p->m_copy_tuple_location.isNull()) {
req_struct.m_tuple_ptr= (Tuple_header*)
c_undo_buffer.get_ptr(&opPtr.p->m_copy_tuple_location);
}
break;
}
ljam();
jam();
opPtr.i= opPtr.p->prevActiveOp;
ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS));
}
......@@ -202,7 +200,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI,
int
Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag)
{
ljamEntry();
jamEntry();
// use own variables instead of globals
FragrecordPtr fragPtr;
fragPtr.i= fragPtrI;
......@@ -305,7 +303,7 @@ Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32* dataO
int
Dbtup::accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag)
{
ljamEntry();
jamEntry();
// get table
TablerecPtr tablePtr;
tablePtr.i = tableId;
......@@ -329,7 +327,7 @@ Dbtup::tuxQueryTh(Uint32 fragPtrI,
Uint32 transId2,
Uint32 savePointId)
{
ljamEntry();
jamEntry();
FragrecordPtr fragPtr;
fragPtr.i= fragPtrI;
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
......@@ -358,9 +356,9 @@ Dbtup::tuxQueryTh(Uint32 fragPtrI,
* for this transaction and savepoint id. If its tuple version
* equals the requested then we have a visible tuple otherwise not.
*/
ljam();
jam();
if (req_struct.m_tuple_ptr->get_tuple_version() == tupVersion) {
ljam();
jam();
return true;
}
}
......@@ -378,7 +376,7 @@ Dbtup::tuxQueryTh(Uint32 fragPtrI,
void
Dbtup::execBUILDINDXREQ(Signal* signal)
{
ljamEntry();
jamEntry();
#ifdef TIME_MEASUREMENT
time_events= 0;
tot_time_passed= 0;
......@@ -387,7 +385,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal)
// get new operation
BuildIndexPtr buildPtr;
if (! c_buildIndexList.seize(buildPtr)) {
ljam();
jam();
BuildIndexRec buildRec;
memcpy(buildRec.m_request, signal->theData, sizeof(buildRec.m_request));
buildRec.m_errorCode= BuildIndxRef::Busy;
......@@ -402,7 +400,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal)
do {
const BuildIndxReq* buildReq= (const BuildIndxReq*)buildPtr.p->m_request;
if (buildReq->getTableId() >= cnoOfTablerec) {
ljam();
jam();
buildPtr.p->m_errorCode= BuildIndxRef::InvalidPrimaryTable;
break;
}
......@@ -410,7 +408,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal)
tablePtr.i= buildReq->getTableId();
ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
if (tablePtr.p->tableStatus != DEFINED) {
ljam();
jam();
buildPtr.p->m_errorCode= BuildIndxRef::InvalidPrimaryTable;
break;
}
......@@ -418,7 +416,7 @@ Dbtup::execBUILDINDXREQ(Signal* signal)
buildPtr.p->m_build_vs =
tablePtr.p->m_attributes[MM].m_no_of_varsize > 0;
if (DictTabInfo::isOrderedIndex(buildReq->getIndexType())) {
ljam();
jam();
const DLList<TupTriggerData>& triggerList =
tablePtr.p->tuxCustomTriggers;
......@@ -426,13 +424,13 @@ Dbtup::execBUILDINDXREQ(Signal* signal)
triggerList.first(triggerPtr);
while (triggerPtr.i != RNIL) {
if (triggerPtr.p->indexId == buildReq->getIndexId()) {
ljam();
jam();
break;
}
triggerList.next(triggerPtr);
}
if (triggerPtr.i == RNIL) {
ljam();
jam();
// trigger was not created
buildPtr.p->m_errorCode = BuildIndxRef::InternalError;
break;
......@@ -440,12 +438,12 @@ Dbtup::execBUILDINDXREQ(Signal* signal)
buildPtr.p->m_indexId = buildReq->getIndexId();
buildPtr.p->m_buildRef = DBTUX;
} else if(buildReq->getIndexId() == RNIL) {
ljam();
jam();
// REBUILD of acc
buildPtr.p->m_indexId = RNIL;
buildPtr.p->m_buildRef = DBACC;
} else {
ljam();
jam();
buildPtr.p->m_errorCode = BuildIndxRef::InvalidIndexType;
break;
}
......@@ -490,7 +488,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
// get fragment
FragrecordPtr fragPtr;
if (buildPtr.p->m_fragNo == MAX_FRAG_PER_NODE) {
ljam();
jam();
// build ready
buildIndexReply(signal, buildPtr.p);
c_buildIndexList.release(buildPtr);
......@@ -499,7 +497,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
ndbrequire(buildPtr.p->m_fragNo < MAX_FRAG_PER_NODE);
fragPtr.i= tablePtr.p->fragrec[buildPtr.p->m_fragNo];
if (fragPtr.i == RNIL) {
ljam();
jam();
buildPtr.p->m_fragNo++;
buildPtr.p->m_pageId= 0;
buildPtr.p->m_tupleNo= firstTupleNo;
......@@ -509,7 +507,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
// get page
PagePtr pagePtr;
if (buildPtr.p->m_pageId >= fragPtr.p->noOfPages) {
ljam();
jam();
buildPtr.p->m_fragNo++;
buildPtr.p->m_pageId= 0;
buildPtr.p->m_tupleNo= firstTupleNo;
......@@ -520,7 +518,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
Uint32 pageState= pagePtr.p->page_state;
// skip empty page
if (pageState == ZEMPTY_MM) {
ljam();
jam();
buildPtr.p->m_pageId++;
buildPtr.p->m_tupleNo= firstTupleNo;
break;
......@@ -530,7 +528,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
const Tuple_header* tuple_ptr = 0;
pageIndex = buildPtr.p->m_tupleNo * tupheadsize;
if (pageIndex + tupheadsize > Fix_page::DATA_WORDS) {
ljam();
jam();
buildPtr.p->m_pageId++;
buildPtr.p->m_tupleNo= firstTupleNo;
break;
......@@ -538,7 +536,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
tuple_ptr = (Tuple_header*)&pagePtr.p->m_data[pageIndex];
// skip over free tuple
if (tuple_ptr->m_header_bits & Tuple_header::FREE) {
ljam();
jam();
buildPtr.p->m_tupleNo++;
break;
}
......@@ -581,7 +579,7 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
tuple as a copy tuple. The original tuple is stable and is thus
preferrable to store in TUX.
*/
ljam();
jam();
/**
* Since copy tuples now can't be found on real pages.
......@@ -610,11 +608,11 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
} while(req->errorCode == 0 && pageOperPtr.i != RNIL);
}
ljamEntry();
jamEntry();
if (req->errorCode != 0) {
switch (req->errorCode) {
case TuxMaintReq::NoMemError:
ljam();
jam();
buildPtr.p->m_errorCode= BuildIndxRef::AllocationFailure;
break;
default:
......@@ -666,7 +664,7 @@ Dbtup::buildIndexReply(Signal* signal, const BuildIndexRec* buildPtrP)
rep->setIndexId(buildReq->getIndexId());
// conf
if (buildPtrP->m_errorCode == BuildIndxRef::NoError) {
ljam();
jam();
sendSignal(rep->getUserRef(), GSN_BUILDINDXCONF,
signal, BuildIndxConf::SignalLength, JBB);
return;
......
......@@ -15,6 +15,7 @@
#define DBTUP_C
#define DBTUP_META_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
......@@ -29,16 +30,13 @@
#include "AttributeOffset.hpp"
#include <my_sys.h>
#define ljam() { jamLine(20000 + __LINE__); }
#define ljamEntry() { jamEntryLine(20000 + __LINE__); }
void Dbtup::execTUPFRAGREQ(Signal* signal)
{
ljamEntry();
jamEntry();
TupFragReq* tupFragReq = (TupFragReq*)signal->getDataPtr();
if (tupFragReq->userPtr == (Uint32)-1) {
ljam();
jam();
abortAddFragOp(signal);
return;
}
......@@ -70,7 +68,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
#ifndef VM_TRACE
// config mismatch - do not crash if release compiled
if (regTabPtr.i >= cnoOfTablerec) {
ljam();
jam();
tupFragReq->userPtr = userptr;
tupFragReq->userRef = 800;
sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB);
......@@ -80,7 +78,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
if (cfirstfreeFragopr == RNIL) {
ljam();
jam();
tupFragReq->userPtr = userptr;
tupFragReq->userRef = ZNOFREE_FRAGOP_ERROR;
sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB);
......@@ -109,29 +107,29 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
getFragmentrec(regFragPtr, fragId, regTabPtr.p);
if (regFragPtr.i != RNIL) {
ljam();
jam();
terrorCode= ZEXIST_FRAG_ERROR;
fragrefuse1Lab(signal, fragOperPtr);
return;
}
if (cfirstfreefrag != RNIL) {
ljam();
jam();
seizeFragrecord(regFragPtr);
} else {
ljam();
jam();
terrorCode= ZFULL_FRAGRECORD_ERROR;
fragrefuse1Lab(signal, fragOperPtr);
return;
}
initFragRange(regFragPtr.p);
if (!addfragtotab(regTabPtr.p, fragId, regFragPtr.i)) {
ljam();
jam();
terrorCode= ZNO_FREE_TAB_ENTRY_ERROR;
fragrefuse2Lab(signal, fragOperPtr, regFragPtr);
return;
}
if (cfirstfreerange == RNIL) {
ljam();
jam();
terrorCode= ZNO_FREE_PAGE_RANGE_ERROR;
fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
return;
......@@ -147,7 +145,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId ||
ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) {
ljam();
jam();
terrorCode = 1;
fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
CLEAR_ERROR_INSERT_VALUE;
......@@ -155,7 +153,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
}
if (regTabPtr.p->tableStatus == NOT_DEFINED) {
ljam();
jam();
//-----------------------------------------------------------------------------
// We are setting up references to the header of the tuple.
// Active operation This word contains a reference to the operation active
......@@ -201,13 +199,13 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
Uint32 offset[10];
Uint32 tableDescriptorRef= allocTabDescr(regTabPtr.p, offset);
if (tableDescriptorRef == RNIL) {
ljam();
jam();
fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
return;
}
setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p, offset);
} else {
ljam();
jam();
fragOperPtr.p->definingFragment= false;
}
signal->theData[0]= fragOperPtr.p->lqhPtrFrag;
......@@ -223,9 +221,9 @@ bool Dbtup::addfragtotab(Tablerec* const regTabPtr,
Uint32 fragIndex)
{
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
ljam();
jam();
if (regTabPtr->fragid[i] == RNIL) {
ljam();
jam();
regTabPtr->fragid[i]= fragId;
regTabPtr->fragrec[i]= fragIndex;
return true;
......@@ -239,9 +237,9 @@ void Dbtup::getFragmentrec(FragrecordPtr& regFragPtr,
Tablerec* const regTabPtr)
{
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
ljam();
jam();
if (regTabPtr->fragid[i] == fragId) {
ljam();
jam();
regFragPtr.i= regTabPtr->fragrec[i];
ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
return;
......@@ -277,7 +275,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
FragoperrecPtr fragOperPtr;
TablerecPtr regTabPtr;
ljamEntry();
jamEntry();
fragOperPtr.i= signal->theData[0];
ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
Uint32 attrId = signal->theData[2];
......@@ -338,7 +336,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
Uint32 attrDes2= 0;
if (!AttributeDescriptor::getDynamic(attrDescriptor)) {
ljam();
jam();
Uint32 pos= 0, null_pos;
Uint32 bytes= AttributeDescriptor::getSizeInBytes(attrDescriptor);
Uint32 words= (bytes + 3) / 4;
......@@ -348,7 +346,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
if (AttributeDescriptor::getNullable(attrDescriptor))
{
ljam();
jam();
fragOperPtr.p->m_null_bits[ind]++;
}
else
......@@ -363,17 +361,17 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
switch (AttributeDescriptor::getArrayType(attrDescriptor)) {
case NDB_ARRAYTYPE_FIXED:
{
ljam();
jam();
regTabPtr.p->m_attributes[ind].m_no_of_fixsize++;
if(attrLen != 0)
{
ljam();
jam();
pos= fragOperPtr.p->m_fix_attributes_size[ind];
fragOperPtr.p->m_fix_attributes_size[ind] += words;
}
else
{
ljam();
jam();
Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
fragOperPtr.p->m_null_bits[ind] += bitCount;
}
......@@ -381,7 +379,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
}
default:
{
ljam();
jam();
fragOperPtr.p->m_var_attributes_size[ind] += bytes;
pos= regTabPtr.p->m_attributes[ind].m_no_of_varsize++;
break;
......@@ -398,13 +396,13 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
ndbrequire(cs != NULL);
Uint32 i = 0;
while (i < fragOperPtr.p->charsetIndex) {
ljam();
jam();
if (regTabPtr.p->charsetArray[i] == cs)
break;
i++;
}
if (i == fragOperPtr.p->charsetIndex) {
ljam();
jam();
fragOperPtr.p->charsetIndex++;
}
ndbrequire(i < regTabPtr.p->noOfCharsets);
......@@ -417,7 +415,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
ERROR_INSERTED(4010) && regTabPtr.p->fragid[0] == fragId && lastAttr ||
ERROR_INSERTED(4011) && regTabPtr.p->fragid[1] == fragId && attrId == 0||
ERROR_INSERTED(4012) && regTabPtr.p->fragid[1] == fragId && lastAttr) {
ljam();
jam();
terrorCode = 1;
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
CLEAR_ERROR_INSERT_VALUE;
......@@ -428,7 +426,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
/* ************** TUP_ADD_ATTCONF ****************** */
/* **************************************************************** */
if (! lastAttr) {
ljam();
jam();
signal->theData[0] = fragOperPtr.p->lqhPtrFrag;
signal->theData[1] = lastAttr;
sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF,
......@@ -554,7 +552,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages);
if (noAllocatedPages == 0) {
ljam();
jam();
terrorCode = ZNO_PAGES_ALLOCATED_ERROR;
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
return;
......@@ -564,7 +562,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
CreateFilegroupImplReq rep;
if(regTabPtr.p->m_no_of_disk_attributes)
{
ljam();
jam();
Tablespace_client tsman(0, c_tsman, 0, 0,
regFragPtr.p->m_tablespace_id);
ndbrequire(tsman.get_tablespace_info(&rep) == 0);
......@@ -581,12 +579,12 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
if (regTabPtr.p->m_no_of_disk_attributes)
{
ljam();
jam();
if(!(getNodeState().startLevel == NodeState::SL_STARTING &&
getNodeState().starting.startPhase <= 4))
{
Callback cb;
ljam();
jam();
cb.m_callbackData= fragOperPtr.i;
cb.m_callbackFunction =
......@@ -600,7 +598,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
int res= lgman.get_log_buffer(signal, sz, &cb);
switch(res){
case 0:
ljam();
jam();
signal->theData[0] = 1;
return;
case -1:
......@@ -719,11 +717,11 @@ void Dbtup::setUpKeyArray(Tablerec* const regTabPtr)
Uint32* keyArray= &tableDescriptor[regTabPtr->readKeyArray].tabDescr;
Uint32 countKeyAttr= 0;
for (Uint32 i= 0; i < regTabPtr->m_no_of_attributes; i++) {
ljam();
jam();
Uint32 refAttr= regTabPtr->tabDescriptor + (i * ZAD_SIZE);
Uint32 attrDescriptor= getTabDescrWord(refAttr);
if (AttributeDescriptor::getPrimaryKey(attrDescriptor)) {
ljam();
jam();
AttributeHeader::init(&keyArray[countKeyAttr], i, 0);
countKeyAttr++;
}
......@@ -743,7 +741,7 @@ void Dbtup::setUpKeyArray(Tablerec* const regTabPtr)
{
for (Uint32 i= 0; i < regTabPtr->m_no_of_attributes; i++)
{
ljam();
jam();
Uint32 refAttr= regTabPtr->tabDescriptor + (i * ZAD_SIZE);
Uint32 desc = getTabDescrWord(refAttr);
Uint32 t = 0;
......@@ -838,9 +836,9 @@ void Dbtup::releaseFragoperrec(FragoperrecPtr fragOperPtr)
void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId)
{
for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
ljam();
jam();
if (regTabPtr->fragid[i] == fragId) {
ljam();
jam();
regTabPtr->fragid[i]= RNIL;
regTabPtr->fragrec[i]= RNIL;
return;
......@@ -866,7 +864,7 @@ void Dbtup::abortAddFragOp(Signal* signal)
void
Dbtup::execDROP_TAB_REQ(Signal* signal)
{
ljamEntry();
jamEntry();
if (ERROR_INSERTED(4013)) {
#ifdef VM_TRACE
verifytabdes();
......@@ -892,7 +890,7 @@ void Dbtup::releaseTabDescr(Tablerec* const regTabPtr)
{
Uint32 descriptor= regTabPtr->readKeyArray;
if (descriptor != RNIL) {
ljam();
jam();
Uint32 offset[10];
getTabDescrOffsets(regTabPtr, offset);
......@@ -923,16 +921,16 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId,
Uint32 fragId = RNIL;
Uint32 i = 0;
for (i = 0; i < MAX_FRAG_PER_NODE; i++) {
ljam();
jam();
if (tabPtr.p->fragid[i] != RNIL) {
ljam();
jam();
fragIndex= tabPtr.p->fragrec[i];
fragId= tabPtr.p->fragid[i];
break;
}
}
if (fragIndex != RNIL) {
ljam();
jam();
signal->theData[0] = ZUNMAP_PAGES;
signal->theData[1] = tabPtr.i;
......@@ -957,7 +955,7 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId,
int res= lgman.get_log_buffer(signal, sz, &cb);
switch(res){
case 0:
ljam();
jam();
return;
case -1:
ndbrequire("NOT YET IMPLEMENTED" == 0);
......@@ -1088,7 +1086,7 @@ Dbtup::drop_fragment_free_extent(Signal *signal,
int res= lgman.get_log_buffer(signal, sz, &cb);
switch(res){
case 0:
ljam();
jam();
return;
case -1:
ndbrequire("NOT YET IMPLEMENTED" == 0);
......@@ -1239,7 +1237,7 @@ Dbtup::drop_fragment_free_extent_log_buffer_callback(Signal* signal,
void
Dbtup::drop_fragment_free_var_pages(Signal* signal)
{
ljam();
jam();
Uint32 tableId = signal->theData[1];
Uint32 fragPtrI = signal->theData[2];
......
......@@ -14,14 +14,12 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_PAG_MAN_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#define ljam() { jamLine(16000 + __LINE__); }
#define ljamEntry() { jamEntryLine(16000 + __LINE__); }
/* ---------------------------------------------------------------- */
// 4) Page Memory Manager (buddy algorithm)
//
......@@ -121,7 +119,7 @@ void Dbtup::initializePage()
}//for
PagePtr pagePtr;
for (pagePtr.i = 0; pagePtr.i < c_page_pool.getSize(); pagePtr.i++) {
ljam();
jam();
refresh_watch_dog();
c_page_pool.getPtr(pagePtr);
pagePtr.p->physical_page_id= RNIL;
......@@ -153,16 +151,16 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate,
Uint32& allocPageRef)
{
if (noOfPagesToAllocate == 0){
ljam();
jam();
noOfPagesAllocated = 0;
return;
}//if
Uint32 firstListToCheck = nextHigherTwoLog(noOfPagesToAllocate - 1);
for (Uint32 i = firstListToCheck; i < 16; i++) {
ljam();
jam();
if (cfreepageList[i] != RNIL) {
ljam();
jam();
/* ---------------------------------------------------------------- */
/* PROPER AMOUNT OF PAGES WERE FOUND. NOW SPLIT THE FOUND */
/* AREA AND RETURN THE PART NOT NEEDED. */
......@@ -182,11 +180,11 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate,
/* ---------------------------------------------------------------- */
if (firstListToCheck)
{
ljam();
jam();
for (Uint32 j = firstListToCheck - 1; (Uint32)~j; j--) {
ljam();
jam();
if (cfreepageList[j] != RNIL) {
ljam();
jam();
/* ---------------------------------------------------------------- */
/* SOME AREA WAS FOUND, ALLOCATE ALL OF IT. */
/* ---------------------------------------------------------------- */
......@@ -212,9 +210,9 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate,
void Dbtup::returnCommonArea(Uint32 retPageRef, Uint32 retNo)
{
do {
ljam();
jam();
if (retNo == 0) {
ljam();
jam();
return;
}//if
Uint32 list = nextHigherTwoLog(retNo) - 1;
......@@ -231,28 +229,28 @@ void Dbtup::findFreeLeftNeighbours(Uint32& allocPageRef,
PagePtr pageFirstPtr, pageLastPtr;
Uint32 remainAllocate = noOfPagesToAllocate - noPagesAllocated;
while (allocPageRef > 0) {
ljam();
jam();
pageLastPtr.i = allocPageRef - 1;
c_page_pool.getPtr(pageLastPtr);
if (pageLastPtr.p->page_state != ZFREE_COMMON) {
ljam();
jam();
return;
} else {
ljam();
jam();
pageFirstPtr.i = pageLastPtr.p->first_cluster_page;
ndbrequire(pageFirstPtr.i != RNIL);
Uint32 list = nextHigherTwoLog(pageLastPtr.i - pageFirstPtr.i);
removeCommonArea(pageFirstPtr.i, list);
Uint32 listSize = 1 << list;
if (listSize > remainAllocate) {
ljam();
jam();
Uint32 retNo = listSize - remainAllocate;
returnCommonArea(pageFirstPtr.i, retNo);
allocPageRef = pageFirstPtr.i + retNo;
noPagesAllocated = noOfPagesToAllocate;
return;
} else {
ljam();
jam();
allocPageRef = pageFirstPtr.i;
noPagesAllocated += listSize;
remainAllocate -= listSize;
......@@ -268,32 +266,32 @@ void Dbtup::findFreeRightNeighbours(Uint32& allocPageRef,
PagePtr pageFirstPtr, pageLastPtr;
Uint32 remainAllocate = noOfPagesToAllocate - noPagesAllocated;
if (remainAllocate == 0) {
ljam();
jam();
return;
}//if
while ((allocPageRef + noPagesAllocated) < c_page_pool.getSize()) {
ljam();
jam();
pageFirstPtr.i = allocPageRef + noPagesAllocated;
c_page_pool.getPtr(pageFirstPtr);
if (pageFirstPtr.p->page_state != ZFREE_COMMON) {
ljam();
jam();
return;
} else {
ljam();
jam();
pageLastPtr.i = pageFirstPtr.p->last_cluster_page;
ndbrequire(pageLastPtr.i != RNIL);
Uint32 list = nextHigherTwoLog(pageLastPtr.i - pageFirstPtr.i);
removeCommonArea(pageFirstPtr.i, list);
Uint32 listSize = 1 << list;
if (listSize > remainAllocate) {
ljam();
jam();
Uint32 retPageRef = pageFirstPtr.i + remainAllocate;
Uint32 retNo = listSize - remainAllocate;
returnCommonArea(retPageRef, retNo);
noPagesAllocated += remainAllocate;
return;
} else {
ljam();
jam();
noPagesAllocated += listSize;
remainAllocate -= listSize;
}//if
......@@ -328,30 +326,30 @@ void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list)
c_page_pool.getPtr(remPagePtr, remPageRef);
ndbrequire(list < 16);
if (cfreepageList[list] == remPagePtr.i) {
ljam();
jam();
cfreepageList[list] = remPagePtr.p->next_cluster_page;
pageNextPtr.i = cfreepageList[list];
if (pageNextPtr.i != RNIL) {
ljam();
jam();
c_page_pool.getPtr(pageNextPtr);
pageNextPtr.p->prev_cluster_page = RNIL;
}//if
} else {
pageSearchPtr.i = cfreepageList[list];
while (true) {
ljam();
jam();
c_page_pool.getPtr(pageSearchPtr);
pagePrevPtr = pageSearchPtr;
pageSearchPtr.i = pageSearchPtr.p->next_cluster_page;
if (pageSearchPtr.i == remPagePtr.i) {
ljam();
jam();
break;
}//if
}//while
pageNextPtr.i = remPagePtr.p->next_cluster_page;
pagePrevPtr.p->next_cluster_page = pageNextPtr.i;
if (pageNextPtr.i != RNIL) {
ljam();
jam();
c_page_pool.getPtr(pageNextPtr);
pageNextPtr.p->prev_cluster_page = pagePrevPtr.i;
}//if
......
......@@ -15,14 +15,12 @@
#define DBTUP_C
#define DBTUP_PAGE_MAP_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#define ljam() { jamLine(14000 + __LINE__); }
#define ljamEntry() { jamEntryLine(14000 + __LINE__); }
//
// PageMap is a service used by Dbtup to map logical page id's to physical
// page id's. The mapping is needs the fragment and the logical page id to
......@@ -92,11 +90,11 @@ Uint32 Dbtup::getEmptyPage(Fragrecord* regFragPtr)
{
Uint32 pageId = regFragPtr->emptyPrimPage.firstItem;
if (pageId == RNIL) {
ljam();
jam();
allocMoreFragPages(regFragPtr);
pageId = regFragPtr->emptyPrimPage.firstItem;
if (pageId == RNIL) {
ljam();
jam();
return RNIL;
}//if
}//if
......@@ -122,11 +120,11 @@ Uint32 Dbtup::getRealpid(Fragrecord* regFragPtr, Uint32 logicalPageId)
loopLimit = grpPageRangePtr.p->currentIndexPos;
ndbrequire(loopLimit <= 3);
for (Uint32 i = 0; i <= loopLimit; i++) {
ljam();
jam();
if (grpPageRangePtr.p->startRange[i] <= logicalPageId) {
if (grpPageRangePtr.p->endRange[i] >= logicalPageId) {
if (grpPageRangePtr.p->type[i] == ZLEAF) {
ljam();
jam();
Uint32 realPageId = (logicalPageId - grpPageRangePtr.p->startRange[i]) +
grpPageRangePtr.p->basePageId[i];
return realPageId;
......@@ -167,12 +165,12 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
{
PageRangePtr currPageRangePtr;
if (cfirstfreerange == RNIL) {
ljam();
jam();
return false;
}//if
currPageRangePtr.i = regFragPtr->currentPageRange;
if (currPageRangePtr.i == RNIL) {
ljam();
jam();
/* ---------------------------------------------------------------- */
/* THE FIRST PAGE RANGE IS HANDLED WITH SPECIAL CODE */
/* ---------------------------------------------------------------- */
......@@ -181,10 +179,10 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
currPageRangePtr.p->currentIndexPos = 0;
currPageRangePtr.p->parentPtr = RNIL;
} else {
ljam();
jam();
ptrCheckGuard(currPageRangePtr, cnoOfPageRangeRec, pageRange);
if (currPageRangePtr.p->currentIndexPos < 3) {
ljam();
jam();
/* ---------------------------------------------------------------- */
/* THE SIMPLE CASE WHEN IT IS ONLY NECESSARY TO FILL IN THE */
/* NEXT EMPTY POSITION IN THE PAGE RANGE RECORD IS TREATED */
......@@ -192,7 +190,7 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
/* ---------------------------------------------------------------- */
currPageRangePtr.p->currentIndexPos++;
} else {
ljam();
jam();
ndbrequire(currPageRangePtr.p->currentIndexPos == 3);
currPageRangePtr.i = leafPageRangeFull(regFragPtr, currPageRangePtr);
if (currPageRangePtr.i == RNIL) {
......@@ -223,15 +221,15 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
PageRangePtr loopPageRangePtr;
loopPageRangePtr = currPageRangePtr;
while (true) {
ljam();
jam();
loopPageRangePtr.i = loopPageRangePtr.p->parentPtr;
if (loopPageRangePtr.i != RNIL) {
ljam();
jam();
ptrCheckGuard(loopPageRangePtr, cnoOfPageRangeRec, pageRange);
ndbrequire(loopPageRangePtr.p->currentIndexPos < 4);
loopPageRangePtr.p->endRange[loopPageRangePtr.p->currentIndexPos] += noPages;
} else {
ljam();
jam();
break;
}//if
}//while
......@@ -243,26 +241,26 @@ bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr,
void Dbtup::releaseFragPages(Fragrecord* regFragPtr)
{
if (regFragPtr->rootPageRange == RNIL) {
ljam();
jam();
return;
}//if
PageRangePtr regPRPtr;
regPRPtr.i = regFragPtr->rootPageRange;
ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange);
while (true) {
ljam();
jam();
const Uint32 indexPos = regPRPtr.p->currentIndexPos;
ndbrequire(indexPos < 4);
const Uint32 basePageId = regPRPtr.p->basePageId[indexPos];
regPRPtr.p->basePageId[indexPos] = RNIL;
if (basePageId == RNIL) {
ljam();
jam();
/**
* Finished with indexPos continue with next
*/
if (indexPos > 0) {
ljam();
jam();
regPRPtr.p->currentIndexPos--;
continue;
}//if
......@@ -274,13 +272,13 @@ void Dbtup::releaseFragPages(Fragrecord* regFragPtr)
releasePagerange(regPRPtr);
if (parentPtr != RNIL) {
ljam();
jam();
regPRPtr.i = parentPtr;
ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange);
continue;
}//if
ljam();
jam();
ndbrequire(regPRPtr.i == regFragPtr->rootPageRange);
initFragRange(regFragPtr);
for (Uint32 i = 0; i<MAX_FREE_LIST; i++)
......@@ -364,7 +362,7 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested
Uint32 retPageRef = RNIL;
allocConsPages(noPagesToAllocate, noOfPagesAllocated, retPageRef);
if (noOfPagesAllocated == 0) {
ljam();
jam();
return tafpPagesAllocated;
}//if
/* ---------------------------------------------------------------- */
......@@ -373,7 +371,7 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested
/* ---------------------------------------------------------------- */
Uint32 startRange = regFragPtr->nextStartRange;
if (!insertPageRangeTab(regFragPtr, retPageRef, noOfPagesAllocated)) {
ljam();
jam();
returnCommonArea(retPageRef, noOfPagesAllocated);
return tafpPagesAllocated;
}//if
......@@ -388,7 +386,7 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested
/* ---------------------------------------------------------------- */
Uint32 prev = RNIL;
for (loopPagePtr.i = retPageRef; loopPagePtr.i < loopLimit; loopPagePtr.i++) {
ljam();
jam();
c_page_pool.getPtr(loopPagePtr);
loopPagePtr.p->page_state = ZEMPTY_MM;
loopPagePtr.p->frag_page_id = startRange +
......@@ -416,10 +414,10 @@ Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested
/* WAS ENOUGH PAGES ALLOCATED OR ARE MORE NEEDED. */
/* ---------------------------------------------------------------- */
if (tafpPagesAllocated < tafpNoAllocRequested) {
ljam();
jam();
} else {
ndbrequire(tafpPagesAllocated == tafpNoAllocRequested);
ljam();
jam();
return tafpNoAllocRequested;
}//if
}//while
......@@ -451,15 +449,15 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
parentPageRangePtr = currPageRangePtr;
Uint32 tiprNoLevels = 1;
while (true) {
ljam();
jam();
parentPageRangePtr.i = parentPageRangePtr.p->parentPtr;
if (parentPageRangePtr.i == RNIL) {
ljam();
jam();
/* ---------------------------------------------------------------- */
/* WE HAVE REACHED THE ROOT. A NEW ROOT MUST BE ALLOCATED. */
/* ---------------------------------------------------------------- */
if (c_noOfFreePageRanges < tiprNoLevels) {
ljam();
jam();
return RNIL;
}//if
PageRangePtr oldRootPRPtr;
......@@ -482,10 +480,10 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
foundPageRangePtr = newRootPRPtr;
break;
} else {
ljam();
jam();
ptrCheckGuard(parentPageRangePtr, cnoOfPageRangeRec, pageRange);
if (parentPageRangePtr.p->currentIndexPos < 3) {
ljam();
jam();
/* ---------------------------------------------------------------- */
/* WE HAVE FOUND AN EMPTY ENTRY IN A PAGE RANGE RECORD. */
/* ALLOCATE A NEW PAGE RANGE RECORD, FILL IN THE START RANGE, */
......@@ -498,7 +496,7 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
foundPageRangePtr = parentPageRangePtr;
break;
} else {
ljam();
jam();
ndbrequire(parentPageRangePtr.p->currentIndexPos == 3);
/* ---------------------------------------------------------------- */
/* THE PAGE RANGE RECORD WAS FULL. FIND THE PARENT RECORD */
......@@ -516,7 +514,7 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
PageRangePtr prevPageRangePtr;
prevPageRangePtr = foundPageRangePtr;
if (c_noOfFreePageRanges < tiprNoLevels) {
ljam();
jam();
return RNIL;
}//if
/* ---------------------------------------------------------------- */
......@@ -527,7 +525,7 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
/* ARE ALSO PROPERLY UPDATED ON THE PATH TO THE LEAF LEVEL. */
/* ---------------------------------------------------------------- */
while (true) {
ljam();
jam();
seizePagerange(newPageRangePtr);
tiprNoLevels--;
ndbrequire(prevPageRangePtr.p->currentIndexPos < 4);
......@@ -535,13 +533,13 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
newPageRangePtr.p->parentPtr = prevPageRangePtr.i;
newPageRangePtr.p->currentIndexPos = 0;
if (tiprNoLevels > 0) {
ljam();
jam();
newPageRangePtr.p->startRange[0] = regFragPtr->nextStartRange;
newPageRangePtr.p->endRange[0] = regFragPtr->nextStartRange - 1;
newPageRangePtr.p->type[0] = ZNON_LEAF;
prevPageRangePtr = newPageRangePtr;
} else {
ljam();
jam();
break;
}//if
}//while
......@@ -576,16 +574,16 @@ void Dbtup::errorHandler(Uint32 errorCode)
{
switch (errorCode) {
case 0:
ljam();
jam();
break;
case 1:
ljam();
jam();
break;
case 2:
ljam();
jam();
break;
default:
ljam();
jam();
}
ndbrequire(false);
}//Dbtup::errorHandler()
......@@ -15,6 +15,7 @@
#define DBTUP_C
#define DBTUP_ROUTINES_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
......@@ -23,9 +24,6 @@
#include "AttributeOffset.hpp"
#include <AttributeHeader.hpp>
#define ljam() { jamLine(3000 + __LINE__); }
#define ljamEntry() { jamEntryLine(3000 + __LINE__); }
void
Dbtup::setUpQueryRoutines(Tablerec *regTabPtr)
{
......@@ -40,23 +38,23 @@ Dbtup::setUpQueryRoutines(Tablerec *regTabPtr)
if (AttributeDescriptor::getArrayType(attrDescr) == NDB_ARRAYTYPE_FIXED){
if (!AttributeDescriptor::getNullable(attrDescr)) {
if (AttributeDescriptor::getSize(attrDescr) == 0){
ljam();
jam();
regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNotNULL;
regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNotNULL;
} else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 4) {
ljam();
jam();
regTabPtr->readFunctionArray[i]=
&Dbtup::readFixedSizeTHOneWordNotNULL;
regTabPtr->updateFunctionArray[i]=
&Dbtup::updateFixedSizeTHOneWordNotNULL;
} else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 8) {
ljam();
jam();
regTabPtr->readFunctionArray[i]=
&Dbtup::readFixedSizeTHTwoWordNotNULL;
regTabPtr->updateFunctionArray[i]=
&Dbtup::updateFixedSizeTHTwoWordNotNULL;
} else {
ljam();
jam();
regTabPtr->readFunctionArray[i]=
&Dbtup::readFixedSizeTHManyWordNotNULL;
regTabPtr->updateFunctionArray[i]=
......@@ -64,27 +62,27 @@ Dbtup::setUpQueryRoutines(Tablerec *regTabPtr)
}
// replace functions for char attribute
if (AttributeOffset::getCharsetFlag(attrOffset)) {
ljam();
jam();
regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNotNULL;
regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNotNULL;
}
} else {
if (AttributeDescriptor::getSize(attrDescr) == 0){
ljam();
jam();
regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNULLable;
regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNULLable;
} else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 4){
ljam();
jam();
regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHOneWordNULLable;
regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
} else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 8) {
ljam();
jam();
regTabPtr->readFunctionArray[i]=
&Dbtup::readFixedSizeTHTwoWordNULLable;
regTabPtr->updateFunctionArray[i]=
&Dbtup::updateFixedSizeTHManyWordNULLable;
} else {
ljam();
jam();
regTabPtr->readFunctionArray[i]=
&Dbtup::readFixedSizeTHManyWordNULLable;
regTabPtr->updateFunctionArray[i]=
......@@ -92,7 +90,7 @@ Dbtup::setUpQueryRoutines(Tablerec *regTabPtr)
}
// replace functions for char attribute
if (AttributeOffset::getCharsetFlag(attrOffset)) {
ljam();
jam();
regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNULLable;
regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
}
......@@ -144,7 +142,7 @@ Dbtup::setUpQueryRoutines(Tablerec *regTabPtr)
}
} else {
if (AttributeDescriptor::getArrayType(attrDescr) == NDB_ARRAYTYPE_FIXED){
ljam();
jam();
regTabPtr->readFunctionArray[i]= &Dbtup::readDynFixedSize;
regTabPtr->updateFunctionArray[i]= &Dbtup::updateDynFixedSize;
} else {
......@@ -204,7 +202,7 @@ int Dbtup::readAttributes(KeyReqStruct *req_struct,
inBufIndex++;
attributeId= ahIn.getAttributeId();
descr_index= attributeId << ZAD_LOG_SIZE;
ljam();
jam();
AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, 0);
ahOut= (AttributeHeader*)&outBuffer[tmpAttrBufIndex];
......@@ -223,7 +221,7 @@ int Dbtup::readAttributes(KeyReqStruct *req_struct,
return -1;
}
} else if(attributeId & AttributeHeader::PSEUDO) {
ljam();
jam();
Uint32 sz= read_pseudo(attributeId,
req_struct,
outBuffer+tmpAttrBufIndex+1);
......@@ -252,13 +250,13 @@ Dbtup::readFixedSizeTHOneWordNotNULL(Uint32* outBuffer,
ndbrequire(readOffset < req_struct->check_offset[MM]);
if (newIndexBuf <= maxRead) {
ljam();
jam();
outBuffer[indexBuf]= wordRead;
ahOut->setDataSize(1);
req_struct->out_buf_index= newIndexBuf;
return true;
} else {
ljam();
jam();
terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR;
return false;
}
......@@ -280,14 +278,14 @@ Dbtup::readFixedSizeTHTwoWordNotNULL(Uint32* outBuffer,
ndbrequire(readOffset + 1 < req_struct->check_offset[MM]);
if (newIndexBuf <= maxRead) {
ljam();
jam();
ahOut->setDataSize(2);
outBuffer[indexBuf]= wordReadFirst;
outBuffer[indexBuf + 1]= wordReadSecond;
req_struct->out_buf_index= newIndexBuf;
return true;
} else {
ljam();
jam();
terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR;
return false;
}
......@@ -311,7 +309,7 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer,
if (! charsetFlag || ! req_struct->xfrm_flag) {
Uint32 newIndexBuf = indexBuf + attrNoOfWords;
if (newIndexBuf <= maxRead) {
ljam();
jam();
ahOut->setByteSize(AttributeDescriptor::getSizeInBytes(attrDescriptor));
MEMCOPY_NO_WORDS(&outBuffer[indexBuf],
&tuple_header[readOffset],
......@@ -319,11 +317,11 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer,
req_struct->out_buf_index = newIndexBuf;
return true;
} else {
ljam();
jam();
terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
}//if
} else {
ljam();
jam();
Tablerec* regTabPtr = tabptr.p;
Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor);
uchar* dstPtr = (uchar*)&outBuffer[indexBuf];
......@@ -340,7 +338,7 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer,
Uint32 dstLen = xmul * (srcBytes - lb);
Uint32 maxIndexBuf = indexBuf + (dstLen >> 2);
if (maxIndexBuf <= maxRead && ok) {
ljam();
jam();
const char* ssrcPtr = (const char*)srcPtr;
int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
ndbrequire(n != -1);
......@@ -354,7 +352,7 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer,
req_struct->out_buf_index = newIndexBuf;
return true;
} else {
ljam();
jam();
terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
}
}
......@@ -368,13 +366,13 @@ Dbtup::readFixedSizeTHOneWordNULLable(Uint32* outBuffer,
Uint32 attrDes2)
{
if (!nullFlagCheck(req_struct, attrDes2)) {
ljam();
jam();
return readFixedSizeTHOneWordNotNULL(outBuffer,
req_struct,
ahOut,
attrDes2);
} else {
ljam();
jam();
ahOut->setNULL();
return true;
}
......@@ -387,13 +385,13 @@ Dbtup::readFixedSizeTHTwoWordNULLable(Uint32* outBuffer,
Uint32 attrDes2)
{
if (!nullFlagCheck(req_struct, attrDes2)) {
ljam();
jam();
return readFixedSizeTHTwoWordNotNULL(outBuffer,
req_struct,
ahOut,
attrDes2);
} else {
ljam();
jam();
ahOut->setNULL();
return true;
}
......@@ -406,13 +404,13 @@ Dbtup::readFixedSizeTHManyWordNULLable(Uint32* outBuffer,
Uint32 attrDes2)
{
if (!nullFlagCheck(req_struct, attrDes2)) {
ljam();
jam();
return readFixedSizeTHManyWordNotNULL(outBuffer,
req_struct,
ahOut,
attrDes2);
} else {
ljam();
jam();
ahOut->setNULL();
return true;
}
......@@ -424,9 +422,9 @@ Dbtup::readFixedSizeTHZeroWordNULLable(Uint32* outBuffer,
AttributeHeader* ahOut,
Uint32 attrDes2)
{
ljam();
jam();
if (nullFlagCheck(req_struct, attrDes2)) {
ljam();
jam();
ahOut->setNULL();
}
return true;
......@@ -478,7 +476,7 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer,
if (! charsetFlag || ! req_struct->xfrm_flag)
{
if (new_index <= max_read) {
ljam();
jam();
ah_out->setByteSize(vsize_in_bytes);
out_buffer[index_buf + (vsize_in_bytes >> 2)] = 0;
memcpy(out_buffer+index_buf,
......@@ -490,7 +488,7 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer,
}
else
{
ljam();
jam();
Tablerec* regTabPtr = tabptr.p;
Uint32 maxBytes = AttributeDescriptor::getSizeInBytes(attr_descriptor);
Uint32 srcBytes = vsize_in_bytes;
......@@ -509,7 +507,7 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer,
Uint32 dstLen = xmul * (maxBytes - lb);
Uint32 maxIndexBuf = index_buf + (dstLen >> 2);
if (maxIndexBuf <= max_read && ok) {
ljam();
jam();
const char* ssrcPtr = (const char*)srcPtr;
int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
ndbrequire(n != -1);
......@@ -524,7 +522,7 @@ Dbtup::readVarSizeNotNULL(Uint32* out_buffer,
return true;
}
}
ljam();
jam();
terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR;
return false;
}
......@@ -536,13 +534,13 @@ Dbtup::readVarSizeNULLable(Uint32* outBuffer,
Uint32 attrDes2)
{
if (!nullFlagCheck(req_struct, attrDes2)) {
ljam();
jam();
return readVarSizeNotNULL(outBuffer,
req_struct,
ahOut,
attrDes2);
} else {
ljam();
jam();
ahOut->setNULL();
return true;
}
......@@ -554,7 +552,7 @@ Dbtup::readDynFixedSize(Uint32* outBuffer,
AttributeHeader* ahOut,
Uint32 attrDes2)
{
ljam();
jam();
terrorCode= ZVAR_SIZED_NOT_SUPPORTED;
return false;
}
......@@ -565,7 +563,7 @@ Dbtup::readDynVarSize(Uint32* outBuffer,
AttributeHeader* ahOut,
Uint32 attrDes2)
{
ljam();
jam();
terrorCode= ZVAR_SIZED_NOT_SUPPORTED;
return false;
}//Dbtup::readDynBigVarSize()
......@@ -588,7 +586,7 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer,
if (! charsetFlag || ! req_struct->xfrm_flag) {
Uint32 newIndexBuf = indexBuf + attrNoOfWords;
if (newIndexBuf <= maxRead) {
ljam();
jam();
ahOut->setByteSize(AttributeDescriptor::getSizeInBytes(attrDescriptor));
MEMCOPY_NO_WORDS(&outBuffer[indexBuf],
&tuple_header[readOffset],
......@@ -596,11 +594,11 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer,
req_struct->out_buf_index = newIndexBuf;
return true;
} else {
ljam();
jam();
terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
}//if
} else {
ljam();
jam();
Tablerec* regTabPtr = tabptr.p;
Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor);
uchar* dstPtr = (uchar*)&outBuffer[indexBuf];
......@@ -617,7 +615,7 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer,
Uint32 dstLen = xmul * (srcBytes - lb);
Uint32 maxIndexBuf = indexBuf + (dstLen >> 2);
if (maxIndexBuf <= maxRead && ok) {
ljam();
jam();
const char* ssrcPtr = (const char*)srcPtr;
int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
ndbrequire(n != -1);
......@@ -631,7 +629,7 @@ Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer,
req_struct->out_buf_index = newIndexBuf;
return true;
} else {
ljam();
jam();
terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
}
}
......@@ -645,13 +643,13 @@ Dbtup::readDiskFixedSizeNULLable(Uint32* outBuffer,
Uint32 attrDes2)
{
if (!disk_nullFlagCheck(req_struct, attrDes2)) {
ljam();
jam();
return readDiskFixedSizeNotNULL(outBuffer,
req_struct,
ahOut,
attrDes2);
} else {
ljam();
jam();
ahOut->setNULL();
return true;
}
......@@ -680,7 +678,7 @@ Dbtup::readDiskVarSizeNotNULL(Uint32* out_buffer,
ndbrequire(vsize_in_words <= max_var_size);
if (new_index <= max_read) {
ljam();
jam();
ah_out->setByteSize(vsize_in_bytes);
memcpy(out_buffer+index_buf,
req_struct->m_var_data[DD].m_data_ptr+var_attr_pos,
......@@ -688,7 +686,7 @@ Dbtup::readDiskVarSizeNotNULL(Uint32* out_buffer,
req_struct->out_buf_index= new_index;
return true;
} else {
ljam();
jam();
terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR;
return false;
}
......@@ -701,13 +699,13 @@ Dbtup::readDiskVarSizeNULLable(Uint32* outBuffer,
Uint32 attrDes2)
{
if (!disk_nullFlagCheck(req_struct, attrDes2)) {
ljam();
jam();
return readDiskVarSizeNotNULL(outBuffer,
req_struct,
ahOut,
attrDes2);
} else {
ljam();
jam();
ahOut->setNULL();
return true;
}
......@@ -749,13 +747,13 @@ int Dbtup::updateAttributes(KeyReqStruct *req_struct,
if (checkUpdateOfPrimaryKey(req_struct,
&inBuffer[inBufIndex],
regTabPtr)) {
ljam();
jam();
terrorCode= ZTRY_UPDATE_PRIMARY_KEY;
return -1;
}
}
UpdateFunction f= regTabPtr->updateFunctionArray[attributeId];
ljam();
jam();
req_struct->attr_descriptor= attrDescriptor;
req_struct->changeMask.set(attributeId);
if (attributeId >= 64) {
......@@ -771,13 +769,13 @@ int Dbtup::updateAttributes(KeyReqStruct *req_struct,
inBufIndex= req_struct->in_buf_index;
continue;
} else {
ljam();
jam();
return -1;
}
}
else if(attributeId == AttributeHeader::DISK_REF)
{
ljam();
jam();
Uint32 sz= ahIn.getDataSize();
ndbrequire(sz == 2);
req_struct->m_tuple_ptr->m_header_bits |= Tuple_header::DISK_PART;
......@@ -788,7 +786,7 @@ int Dbtup::updateAttributes(KeyReqStruct *req_struct,
}
else
{
ljam();
jam();
terrorCode= ZATTRIBUTE_ID_ERROR;
return -1;
}
......@@ -842,13 +840,13 @@ Dbtup::checkUpdateOfPrimaryKey(KeyReqStruct* req_struct,
ndbrequire(req_struct->out_buf_index == ahOut->getDataSize());
if (ahIn.getDataSize() != ahOut->getDataSize()) {
ljam();
jam();
return true;
}
if (memcmp(&keyReadBuffer[0],
&updateBuffer[1],
req_struct->out_buf_index << 2) != 0) {
ljam();
jam();
return true;
}
return false;
......@@ -871,17 +869,17 @@ Dbtup::updateFixedSizeTHOneWordNotNULL(Uint32* inBuffer,
if (newIndex <= inBufLen) {
Uint32 updateWord= inBuffer[indexBuf + 1];
if (!nullIndicator) {
ljam();
jam();
req_struct->in_buf_index= newIndex;
tuple_header[updateOffset]= updateWord;
return true;
} else {
ljam();
jam();
terrorCode= ZNOT_NULL_ATTR;
return false;
}
} else {
ljam();
jam();
terrorCode= ZAI_INCONSISTENCY_ERROR;
return false;
}
......@@ -906,18 +904,18 @@ Dbtup::updateFixedSizeTHTwoWordNotNULL(Uint32* inBuffer,
Uint32 updateWord1= inBuffer[indexBuf + 1];
Uint32 updateWord2= inBuffer[indexBuf + 2];
if (!nullIndicator) {
ljam();
jam();
req_struct->in_buf_index= newIndex;
tuple_header[updateOffset]= updateWord1;
tuple_header[updateOffset + 1]= updateWord2;
return true;
} else {
ljam();
jam();
terrorCode= ZNOT_NULL_ATTR;
return false;
}
} else {
ljam();
jam();
terrorCode= ZAI_INCONSISTENCY_ERROR;
return false;
}
......@@ -943,9 +941,9 @@ Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer,
if (newIndex <= inBufLen) {
if (!nullIndicator) {
ljam();
jam();
if (charsetFlag) {
ljam();
jam();
Tablerec* regTabPtr = tabptr.p;
Uint32 typeId = AttributeDescriptor::getType(attrDescriptor);
Uint32 bytes = AttributeDescriptor::getSizeInBytes(attrDescriptor);
......@@ -957,14 +955,14 @@ Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer,
const char* ssrc = (const char*)&inBuffer[indexBuf + 1];
Uint32 lb, len;
if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) {
ljam();
jam();
terrorCode = ZINVALID_CHAR_FORMAT;
return false;
}
// fast fix bug#7340
if (typeId != NDB_TYPE_TEXT &&
(*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL, &not_used) != len) {
ljam();
jam();
terrorCode = ZINVALID_CHAR_FORMAT;
return false;
}
......@@ -976,12 +974,12 @@ Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer,
return true;
} else {
ljam();
jam();
terrorCode= ZNOT_NULL_ATTR;
return false;
}
} else {
ljam();
jam();
terrorCode= ZAI_INCONSISTENCY_ERROR;
return false;
}
......@@ -999,7 +997,7 @@ Dbtup::updateFixedSizeTHManyWordNULLable(Uint32* inBuffer,
Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr);
if (!nullIndicator) {
ljam();
jam();
BitmaskImpl::clear(regTabPtr->m_offsets[MM].m_null_words, bits, pos);
return updateFixedSizeTHManyWordNotNULL(inBuffer,
req_struct,
......@@ -1008,11 +1006,11 @@ Dbtup::updateFixedSizeTHManyWordNULLable(Uint32* inBuffer,
Uint32 newIndex= req_struct->in_buf_index + 1;
if (newIndex <= req_struct->in_buf_len) {
BitmaskImpl::set(regTabPtr->m_offsets[MM].m_null_words, bits, pos);
ljam();
jam();
req_struct->in_buf_index= newIndex;
return true;
} else {
ljam();
jam();
terrorCode= ZAI_INCONSISTENCY_ERROR;
return false;
}
......@@ -1046,7 +1044,7 @@ Dbtup::updateVarSizeNotNULL(Uint32* in_buffer,
if (new_index <= in_buf_len && vsize_in_words <= max_var_size) {
if (!null_ind) {
ljam();
jam();
var_attr_pos= vpos_array[var_index];
var_data_start= req_struct->m_var_data[MM].m_data_ptr;
vpos_array[var_index+idx]= var_attr_pos+size_in_bytes;
......@@ -1057,12 +1055,12 @@ Dbtup::updateVarSizeNotNULL(Uint32* in_buffer,
size_in_bytes);
return true;
} else {
ljam();
jam();
terrorCode= ZNOT_NULL_ATTR;
return false;
}
} else {
ljam();
jam();
terrorCode= ZAI_INCONSISTENCY_ERROR;
return false;
}
......@@ -1082,7 +1080,7 @@ Dbtup::updateVarSizeNULLable(Uint32* inBuffer,
Uint32 idx= req_struct->m_var_data[MM].m_var_len_offset;
if (!nullIndicator) {
ljam();
jam();
BitmaskImpl::clear(regTabPtr->m_offsets[MM].m_null_words, bits, pos);
return updateVarSizeNotNULL(inBuffer,
req_struct,
......@@ -1092,13 +1090,13 @@ Dbtup::updateVarSizeNULLable(Uint32* inBuffer,
Uint32 var_index= AttributeOffset::getOffset(attrDes2);
Uint32 var_pos= req_struct->var_pos_array[var_index];
if (newIndex <= req_struct->in_buf_len) {
ljam();
jam();
BitmaskImpl::set(regTabPtr->m_offsets[MM].m_null_words, bits, pos);
req_struct->var_pos_array[var_index+idx]= var_pos;
req_struct->in_buf_index= newIndex;
return true;
} else {
ljam();
jam();
terrorCode= ZAI_INCONSISTENCY_ERROR;
return false;
}
......@@ -1110,7 +1108,7 @@ Dbtup::updateDynFixedSize(Uint32* inBuffer,
KeyReqStruct *req_struct,
Uint32 attrDes2)
{
ljam();
jam();
terrorCode= ZVAR_SIZED_NOT_SUPPORTED;
return false;
}
......@@ -1120,7 +1118,7 @@ Dbtup::updateDynVarSize(Uint32* inBuffer,
KeyReqStruct *req_struct,
Uint32 attrDes2)
{
ljam();
jam();
terrorCode= ZVAR_SIZED_NOT_SUPPORTED;
return false;
}
......@@ -1218,7 +1216,7 @@ Dbtup::readBitsNotNULL(Uint32* outBuffer,
Uint32 maxRead = req_struct->max_read;
Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr);
if (newIndexBuf <= maxRead) {
ljam();
jam();
ahOut->setDataSize((bitCount + 31) >> 5);
req_struct->out_buf_index = newIndexBuf;
......@@ -1227,7 +1225,7 @@ Dbtup::readBitsNotNULL(Uint32* outBuffer,
return true;
} else {
ljam();
jam();
terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
return false;
}//if
......@@ -1251,20 +1249,20 @@ Dbtup::readBitsNULLable(Uint32* outBuffer,
if(BitmaskImpl::get(regTabPtr->m_offsets[MM].m_null_words, bits, pos))
{
ljam();
jam();
ahOut->setNULL();
return true;
}
if (newIndexBuf <= maxRead) {
ljam();
jam();
ahOut->setDataSize((bitCount + 31) >> 5);
req_struct->out_buf_index = newIndexBuf;
BitmaskImpl::getField(regTabPtr->m_offsets[MM].m_null_words, bits, pos+1,
bitCount, outBuffer+indexBuf);
return true;
} else {
ljam();
jam();
terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
return false;
}//if
......@@ -1293,12 +1291,12 @@ Dbtup::updateBitsNotNULL(Uint32* inBuffer,
req_struct->in_buf_index = newIndex;
return true;
} else {
ljam();
jam();
terrorCode = ZNOT_NULL_ATTR;
return false;
}//if
} else {
ljam();
jam();
terrorCode = ZAI_INCONSISTENCY_ERROR;
return false;
}//if
......@@ -1331,13 +1329,13 @@ Dbtup::updateBitsNULLable(Uint32* inBuffer,
Uint32 newIndex = indexBuf + 1;
if (newIndex <= req_struct->in_buf_len)
{
ljam();
jam();
BitmaskImpl::set(regTabPtr->m_offsets[MM].m_null_words, bits, pos);
req_struct->in_buf_index = newIndex;
return true;
} else {
ljam();
jam();
terrorCode = ZAI_INCONSISTENCY_ERROR;
return false;
}//if
......@@ -1364,9 +1362,9 @@ Dbtup::updateDiskFixedSizeNotNULL(Uint32* inBuffer,
if (newIndex <= inBufLen) {
if (!nullIndicator) {
ljam();
jam();
if (charsetFlag) {
ljam();
jam();
Tablerec* regTabPtr = tabptr.p;
Uint32 typeId = AttributeDescriptor::getType(attrDescriptor);
Uint32 bytes = AttributeDescriptor::getSizeInBytes(attrDescriptor);
......@@ -1378,14 +1376,14 @@ Dbtup::updateDiskFixedSizeNotNULL(Uint32* inBuffer,
const char* ssrc = (const char*)&inBuffer[indexBuf + 1];
Uint32 lb, len;
if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) {
ljam();
jam();
terrorCode = ZINVALID_CHAR_FORMAT;
return false;
}
// fast fix bug#7340
if (typeId != NDB_TYPE_TEXT &&
(*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL, &not_used) != len) {
ljam();
jam();
terrorCode = ZINVALID_CHAR_FORMAT;
return false;
}
......@@ -1396,12 +1394,12 @@ Dbtup::updateDiskFixedSizeNotNULL(Uint32* inBuffer,
noOfWords);
return true;
} else {
ljam();
jam();
terrorCode= ZNOT_NULL_ATTR;
return false;
}
} else {
ljam();
jam();
terrorCode= ZAI_INCONSISTENCY_ERROR;
return false;
}
......@@ -1419,7 +1417,7 @@ Dbtup::updateDiskFixedSizeNULLable(Uint32* inBuffer,
Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD);
if (!nullIndicator) {
ljam();
jam();
BitmaskImpl::clear(regTabPtr->m_offsets[DD].m_null_words, bits, pos);
return updateDiskFixedSizeNotNULL(inBuffer,
req_struct,
......@@ -1428,11 +1426,11 @@ Dbtup::updateDiskFixedSizeNULLable(Uint32* inBuffer,
Uint32 newIndex= req_struct->in_buf_index + 1;
if (newIndex <= req_struct->in_buf_len) {
BitmaskImpl::set(regTabPtr->m_offsets[DD].m_null_words, bits, pos);
ljam();
jam();
req_struct->in_buf_index= newIndex;
return true;
} else {
ljam();
jam();
terrorCode= ZAI_INCONSISTENCY_ERROR;
return false;
}
......@@ -1466,7 +1464,7 @@ Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer,
if (new_index <= in_buf_len && vsize_in_words <= max_var_size) {
if (!null_ind) {
ljam();
jam();
var_attr_pos= vpos_array[var_index];
var_data_start= req_struct->m_var_data[DD].m_data_ptr;
vpos_array[var_index+idx]= var_attr_pos+size_in_bytes;
......@@ -1477,12 +1475,12 @@ Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer,
size_in_bytes);
return true;
} else {
ljam();
jam();
terrorCode= ZNOT_NULL_ATTR;
return false;
}
} else {
ljam();
jam();
terrorCode= ZAI_INCONSISTENCY_ERROR;
return false;
}
......@@ -1502,7 +1500,7 @@ Dbtup::updateDiskVarSizeNULLable(Uint32* inBuffer,
Uint32 idx= req_struct->m_var_data[DD].m_var_len_offset;
if (!nullIndicator) {
ljam();
jam();
BitmaskImpl::clear(regTabPtr->m_offsets[DD].m_null_words, bits, pos);
return updateDiskVarSizeNotNULL(inBuffer,
req_struct,
......@@ -1512,13 +1510,13 @@ Dbtup::updateDiskVarSizeNULLable(Uint32* inBuffer,
Uint32 var_index= AttributeOffset::getOffset(attrDes2);
Uint32 var_pos= req_struct->var_pos_array[var_index];
if (newIndex <= req_struct->in_buf_len) {
ljam();
jam();
BitmaskImpl::set(regTabPtr->m_offsets[DD].m_null_words, bits, pos);
req_struct->var_pos_array[var_index+idx]= var_pos;
req_struct->in_buf_index= newIndex;
return true;
} else {
ljam();
jam();
terrorCode= ZAI_INCONSISTENCY_ERROR;
return false;
}
......@@ -1540,7 +1538,7 @@ Dbtup::readDiskBitsNotNULL(Uint32* outBuffer,
Uint32 maxRead = req_struct->max_read;
Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD);
if (newIndexBuf <= maxRead) {
ljam();
jam();
ahOut->setDataSize((bitCount + 31) >> 5);
req_struct->out_buf_index = newIndexBuf;
......@@ -1549,7 +1547,7 @@ Dbtup::readDiskBitsNotNULL(Uint32* outBuffer,
return true;
} else {
ljam();
jam();
terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
return false;
}//if
......@@ -1573,20 +1571,20 @@ Dbtup::readDiskBitsNULLable(Uint32* outBuffer,
if(BitmaskImpl::get(regTabPtr->m_offsets[DD].m_null_words, bits, pos))
{
ljam();
jam();
ahOut->setNULL();
return true;
}
if (newIndexBuf <= maxRead) {
ljam();
jam();
ahOut->setDataSize((bitCount + 31) >> 5);
req_struct->out_buf_index = newIndexBuf;
BitmaskImpl::getField(regTabPtr->m_offsets[DD].m_null_words, bits, pos+1,
bitCount, outBuffer+indexBuf);
return true;
} else {
ljam();
jam();
terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
return false;
}//if
......@@ -1615,12 +1613,12 @@ Dbtup::updateDiskBitsNotNULL(Uint32* inBuffer,
req_struct->in_buf_index = newIndex;
return true;
} else {
ljam();
jam();
terrorCode = ZNOT_NULL_ATTR;
return false;
}//if
} else {
ljam();
jam();
terrorCode = ZAI_INCONSISTENCY_ERROR;
return false;
}//if
......@@ -1653,13 +1651,13 @@ Dbtup::updateDiskBitsNULLable(Uint32* inBuffer,
Uint32 newIndex = indexBuf + 1;
if (newIndex <= req_struct->in_buf_len)
{
ljam();
jam();
BitmaskImpl::set(regTabPtr->m_offsets[DD].m_null_words, bits, pos);
req_struct->in_buf_index = newIndex;
return true;
} else {
ljam();
jam();
terrorCode = ZAI_INCONSISTENCY_ERROR;
return false;
}//if
......
......@@ -14,6 +14,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_SCAN_CPP
#include "Dbtup.hpp"
#include <signaldata/AccScan.hpp>
#include <signaldata/NextScan.hpp>
......
......@@ -15,14 +15,12 @@
#define DBTUP_C
#define DBTUP_STORE_PROC_DEF_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#define ljam() { jamLine(18000 + __LINE__); }
#define ljamEntry() { jamEntryLine(18000 + __LINE__); }
/* ---------------------------------------------------------------- */
/* ---------------------------------------------------------------- */
/* ------------ADD/DROP STORED PROCEDURE MODULE ------------------- */
......@@ -32,7 +30,7 @@ void Dbtup::execSTORED_PROCREQ(Signal* signal)
{
OperationrecPtr regOperPtr;
TablerecPtr regTabPtr;
ljamEntry();
jamEntry();
regOperPtr.i = signal->theData[0];
c_operation_pool.getPtr(regOperPtr);
regTabPtr.i = signal->theData[1];
......@@ -46,17 +44,17 @@ void Dbtup::execSTORED_PROCREQ(Signal* signal)
ndbrequire(regTabPtr.p->tableStatus == DEFINED);
switch (requestInfo) {
case ZSCAN_PROCEDURE:
ljam();
jam();
scanProcedure(signal,
regOperPtr.p,
signal->theData[4]);
break;
case ZCOPY_PROCEDURE:
ljam();
jam();
copyProcedure(signal, regTabPtr, regOperPtr.p);
break;
case ZSTORED_PROCEDURE_DELETE:
ljam();
jam();
deleteScanProcedure(signal, regOperPtr.p);
break;
default:
......@@ -124,14 +122,14 @@ void Dbtup::copyProcedure(Signal* signal,
AttributeHeader::init(&signal->theData[length + 1], Ti, 0);
length++;
if (length == 24) {
ljam();
jam();
ndbrequire(storedProcedureAttrInfo(signal, regOperPtr,
signal->theData+1, length, true));
length = 0;
}//if
}//for
if (length != 0) {
ljam();
jam();
ndbrequire(storedProcedureAttrInfo(signal, regOperPtr,
signal->theData+1, length, true));
}//if
......@@ -155,7 +153,7 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal,
ndbrequire(regOperPtr->currentAttrinbufLen <= regOperPtr->attrinbufLen);
if ((RnoFree > MIN_ATTRBUF) ||
(copyProcedure)) {
ljam();
jam();
regAttrPtr.i = cfirstfreeAttrbufrec;
ptrCheckGuard(regAttrPtr, cnoOfAttrbufrec, attrbufrec);
regAttrPtr.p->attrbuf[ZBUF_DATA_LEN] = 0;
......@@ -163,18 +161,18 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal,
cnoFreeAttrbufrec = RnoFree - 1;
regAttrPtr.p->attrbuf[ZBUF_NEXT] = RNIL;
} else {
ljam();
jam();
storedSeizeAttrinbufrecErrorLab(signal, regOperPtr);
return false;
}//if
if (regOperPtr->firstAttrinbufrec == RNIL) {
ljam();
jam();
regOperPtr->firstAttrinbufrec = regAttrPtr.i;
}//if
regAttrPtr.p->attrbuf[ZBUF_NEXT] = RNIL;
if (regOperPtr->lastAttrinbufrec != RNIL) {
AttrbufrecPtr tempAttrinbufptr;
ljam();
jam();
tempAttrinbufptr.i = regOperPtr->lastAttrinbufrec;
ptrCheckGuard(tempAttrinbufptr, cnoOfAttrbufrec, attrbufrec);
tempAttrinbufptr.p->attrbuf[ZBUF_NEXT] = regAttrPtr.i;
......@@ -187,7 +185,7 @@ bool Dbtup::storedProcedureAttrInfo(Signal* signal,
length);
if (regOperPtr->currentAttrinbufLen < regOperPtr->attrinbufLen) {
ljam();
jam();
return true;
}//if
if (ERROR_INSERTED(4005) && !copyProcedure) {
......
......@@ -15,14 +15,12 @@
#define DBTUP_C
#define DBTUP_TAB_DES_MAN_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
#define ljam() { jamLine(22000 + __LINE__); }
#define ljamEntry() { jamEntryLine(22000 + __LINE__); }
/*
* TABLE DESCRIPTOR MEMORY MANAGER
*
......@@ -65,30 +63,30 @@ Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset)
allocSize = (((allocSize - 1) >> 4) + 1) << 4;
Uint32 list = nextHigherTwoLog(allocSize - 1); /* CALCULATE WHICH LIST IT BELONGS TO */
for (Uint32 i = list; i < 16; i++) {
ljam();
jam();
if (cfreeTdList[i] != RNIL) {
ljam();
jam();
reference = cfreeTdList[i];
removeTdArea(reference, i); /* REMOVE THE AREA FROM THE FREELIST */
Uint32 retNo = (1 << i) - allocSize; /* CALCULATE THE DIFFERENCE */
if (retNo >= ZTD_FREE_SIZE) {
ljam();
jam();
// return unused words, of course without attempting left merge
Uint32 retRef = reference + allocSize;
freeTabDescr(retRef, retNo, false);
} else {
ljam();
jam();
allocSize = 1 << i;
}//if
break;
}//if
}//for
if (reference == RNIL) {
ljam();
jam();
terrorCode = ZMEM_NOTABDESCR_ERROR;
return RNIL;
} else {
ljam();
jam();
setTabDescrWord((reference + allocSize) - ZTD_TR_TYPE, ZTD_TYPE_NORMAL);
setTabDescrWord(reference + ZTD_DATASIZE, allocSize);
......@@ -105,7 +103,7 @@ void Dbtup::freeTabDescr(Uint32 retRef, Uint32 retNo, bool normal)
{
itdaMergeTabDescr(retRef, retNo, normal); /* MERGE WITH POSSIBLE NEIGHBOURS */
while (retNo >= ZTD_FREE_SIZE) {
ljam();
jam();
Uint32 list = nextHigherTwoLog(retNo);
list--; /* RETURN TO NEXT LOWER LIST */
Uint32 sizeOfChunk = 1 << list;
......@@ -136,7 +134,7 @@ void Dbtup::insertTdArea(Uint32 tabDesRef, Uint32 list)
setTabDescrWord(tabDesRef + ZTD_FL_HEADER, ZTD_TYPE_FREE);
setTabDescrWord(tabDesRef + ZTD_FL_NEXT, cfreeTdList[list]);
if (cfreeTdList[list] != RNIL) {
ljam(); /* PREVIOUSLY EMPTY SLOT */
jam(); /* PREVIOUSLY EMPTY SLOT */
setTabDescrWord(cfreeTdList[list] + ZTD_FL_PREV, tabDesRef);
}//if
cfreeTdList[list] = tabDesRef; /* RELINK THE LIST */
......@@ -156,28 +154,28 @@ void Dbtup::itdaMergeTabDescr(Uint32& retRef, Uint32& retNo, bool normal)
{
// merge right
while ((retRef + retNo) < cnoOfTabDescrRec) {
ljam();
jam();
Uint32 tabDesRef = retRef + retNo;
Uint32 headerWord = getTabDescrWord(tabDesRef + ZTD_FL_HEADER);
if (headerWord == ZTD_TYPE_FREE) {
ljam();
jam();
Uint32 sizeOfMergedPart = getTabDescrWord(tabDesRef + ZTD_FL_SIZE);
retNo += sizeOfMergedPart;
Uint32 list = nextHigherTwoLog(sizeOfMergedPart - 1);
removeTdArea(tabDesRef, list);
} else {
ljam();
jam();
break;
}
}
// merge left
const bool mergeLeft = normal;
while (mergeLeft && retRef > 0) {
ljam();
jam();
Uint32 trailerWord = getTabDescrWord(retRef - ZTD_TR_TYPE);
if (trailerWord == ZTD_TYPE_FREE) {
ljam();
jam();
Uint32 sizeOfMergedPart = getTabDescrWord(retRef - ZTD_TR_SIZE);
ndbrequire(retRef >= sizeOfMergedPart);
retRef -= sizeOfMergedPart;
......@@ -185,7 +183,7 @@ void Dbtup::itdaMergeTabDescr(Uint32& retRef, Uint32& retNo, bool normal)
Uint32 list = nextHigherTwoLog(sizeOfMergedPart - 1);
removeTdArea(retRef, list);
} else {
ljam();
jam();
break;
}
}
......@@ -213,15 +211,15 @@ void Dbtup::removeTdArea(Uint32 tabDesRef, Uint32 list)
setTabDescrWord((tabDesRef + (1 << list)) - ZTD_TR_TYPE, ZTD_TYPE_NORMAL);
if (tabDesRef == cfreeTdList[list]) {
ljam();
jam();
cfreeTdList[list] = tabDescrNextPtr; /* RELINK THE LIST */
}//if
if (tabDescrNextPtr != RNIL) {
ljam();
jam();
setTabDescrWord(tabDescrNextPtr + ZTD_FL_PREV, tabDescrPrevPtr);
}//if
if (tabDescrPrevPtr != RNIL) {
ljam();
jam();
setTabDescrWord(tabDescrPrevPtr + ZTD_FL_NEXT, tabDescrNextPtr);
}//if
}//Dbtup::removeTdArea()
......
......@@ -15,6 +15,7 @@
#define DBTUP_C
#define DBTUP_TRIGGER_CPP
#include "Dbtup.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
......@@ -26,9 +27,6 @@
#include <signaldata/CreateTrig.hpp>
#include <signaldata/TuxMaint.hpp>
#define ljam() { jamLine(7000 + __LINE__); }
#define ljamEntry() { jamEntryLine(7000 + __LINE__); }
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* ----------------------- TRIGGER HANDLING ----------------------- */
......@@ -47,17 +45,17 @@ Dbtup::findTriggerList(Tablerec* table,
case TriggerType::SUBSCRIPTION_BEFORE:
switch (tevent) {
case TriggerEvent::TE_INSERT:
ljam();
jam();
if (ttime == TriggerActionTime::TA_DETACHED)
tlist = &table->subscriptionInsertTriggers;
break;
case TriggerEvent::TE_UPDATE:
ljam();
jam();
if (ttime == TriggerActionTime::TA_DETACHED)
tlist = &table->subscriptionUpdateTriggers;
break;
case TriggerEvent::TE_DELETE:
ljam();
jam();
if (ttime == TriggerActionTime::TA_DETACHED)
tlist = &table->subscriptionDeleteTriggers;
break;
......@@ -68,17 +66,17 @@ Dbtup::findTriggerList(Tablerec* table,
case TriggerType::SECONDARY_INDEX:
switch (tevent) {
case TriggerEvent::TE_INSERT:
ljam();
jam();
if (ttime == TriggerActionTime::TA_AFTER)
tlist = &table->afterInsertTriggers;
break;
case TriggerEvent::TE_UPDATE:
ljam();
jam();
if (ttime == TriggerActionTime::TA_AFTER)
tlist = &table->afterUpdateTriggers;
break;
case TriggerEvent::TE_DELETE:
ljam();
jam();
if (ttime == TriggerActionTime::TA_AFTER)
tlist = &table->afterDeleteTriggers;
break;
......@@ -89,7 +87,7 @@ Dbtup::findTriggerList(Tablerec* table,
case TriggerType::ORDERED_INDEX:
switch (tevent) {
case TriggerEvent::TE_CUSTOM:
ljam();
jam();
if (ttime == TriggerActionTime::TA_CUSTOM)
tlist = &table->tuxCustomTriggers;
break;
......@@ -100,7 +98,7 @@ Dbtup::findTriggerList(Tablerec* table,
case TriggerType::READ_ONLY_CONSTRAINT:
switch (tevent) {
case TriggerEvent::TE_UPDATE:
ljam();
jam();
if (ttime == TriggerActionTime::TA_AFTER)
tlist = &table->constraintUpdateTriggers;
break;
......@@ -118,7 +116,7 @@ Dbtup::findTriggerList(Tablerec* table,
void
Dbtup::execCREATE_TRIG_REQ(Signal* signal)
{
ljamEntry();
jamEntry();
BlockReference senderRef = signal->getSendersBlockRef();
const CreateTrigReq reqCopy = *(const CreateTrigReq*)signal->getDataPtr();
const CreateTrigReq* const req = &reqCopy;
......@@ -131,13 +129,13 @@ Dbtup::execCREATE_TRIG_REQ(Signal* signal)
if (tabPtr.p->tableStatus != DEFINED )
{
ljam();
jam();
error= CreateTrigRef::InvalidTable;
}
// Create trigger and associate it with the table
else if (createTrigger(tabPtr.p, req))
{
ljam();
jam();
// Send conf
CreateTrigConf* const conf = (CreateTrigConf*)signal->getDataPtrSend();
conf->setUserRef(reference());
......@@ -153,7 +151,7 @@ Dbtup::execCREATE_TRIG_REQ(Signal* signal)
}
else
{
ljam();
jam();
error= CreateTrigRef::TooManyTriggers;
}
ndbassert(error != CreateTrigRef::NoError);
......@@ -174,7 +172,7 @@ Dbtup::execCREATE_TRIG_REQ(Signal* signal)
void
Dbtup::execDROP_TRIG_REQ(Signal* signal)
{
ljamEntry();
jamEntry();
BlockReference senderRef = signal->getSendersBlockRef();
const DropTrigReq reqCopy = *(const DropTrigReq*)signal->getDataPtr();
const DropTrigReq* const req = &reqCopy;
......@@ -262,7 +260,7 @@ Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req)
if ((tptr.p->triggerType == TriggerType::SUBSCRIPTION) &&
((tptr.p->triggerEvent == TriggerEvent::TE_UPDATE) ||
(tptr.p->triggerEvent == TriggerEvent::TE_DELETE))) {
ljam();
jam();
tptr.p->sendBeforeValues = false;
}
/*
......@@ -270,7 +268,7 @@ Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req)
if (((tptr.p->triggerType == TriggerType::SUBSCRIPTION) ||
(tptr.p->triggerType == TriggerType::SUBSCRIPTION_BEFORE)) &&
(tptr.p->triggerEvent == TriggerEvent::TE_UPDATE)) {
ljam();
jam();
tptr.p->sendOnlyChangedAttributes = true;
}
*/
......@@ -282,16 +280,16 @@ Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req)
tptr.p->attributeMask.clear();
if (tptr.p->monitorAllAttributes) {
ljam();
jam();
for(Uint32 i = 0; i < table->m_no_of_attributes; i++) {
if (!primaryKey(table, i)) {
ljam();
jam();
tptr.p->attributeMask.set(i);
}
}
} else {
// Set attribute mask
ljam();
jam();
tptr.p->attributeMask = req->getAttributeMask();
}
return true;
......@@ -336,7 +334,7 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender)
Ptr<TupTriggerData> ptr;
for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) {
ljam();
jam();
if (ptr.p->triggerId == triggerId) {
if(ttype==TriggerType::SUBSCRIPTION && sender != ptr.p->m_receiverBlock)
{
......@@ -348,10 +346,10 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender)
*
* Backup doesn't really care about the Ids though.
*/
ljam();
jam();
continue;
}
ljam();
jam();
tlist->release(ptr.i);
return 0;
}
......@@ -379,7 +377,7 @@ Dbtup::checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct,
if ((regOperPtr->op_struct.primary_replica) &&
(!(regTablePtr->afterInsertTriggers.isEmpty()))) {
ljam();
jam();
fireImmediateTriggers(req_struct,
regTablePtr->afterInsertTriggers,
regOperPtr);
......@@ -397,14 +395,14 @@ Dbtup::checkImmediateTriggersAfterUpdate(KeyReqStruct *req_struct,
if ((regOperPtr->op_struct.primary_replica) &&
(!(regTablePtr->afterUpdateTriggers.isEmpty()))) {
ljam();
jam();
fireImmediateTriggers(req_struct,
regTablePtr->afterUpdateTriggers,
regOperPtr);
}
if ((regOperPtr->op_struct.primary_replica) &&
(!(regTablePtr->constraintUpdateTriggers.isEmpty()))) {
ljam();
jam();
fireImmediateTriggers(req_struct,
regTablePtr->constraintUpdateTriggers,
regOperPtr);
......@@ -422,7 +420,7 @@ Dbtup::checkImmediateTriggersAfterDelete(KeyReqStruct *req_struct,
if ((regOperPtr->op_struct.primary_replica) &&
(!(regTablePtr->afterDeleteTriggers.isEmpty()))) {
ljam();
jam();
executeTriggers(req_struct,
regTablePtr->afterDeleteTriggers,
regOperPtr);
......@@ -443,7 +441,7 @@ void Dbtup::checkDeferredTriggers(Signal* signal,
Operationrec* const regOperPtr,
Tablerec* const regTablePtr)
{
ljam();
jam();
// NYI
}//Dbtup::checkDeferredTriggers()
#endif
......@@ -479,7 +477,7 @@ void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct,
if (save_ptr->m_header_bits & Tuple_header::ALLOC) {
if (save_type == ZDELETE) {
// insert + delete = nothing
ljam();
jam();
return;
goto end;
}
......@@ -495,10 +493,10 @@ void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct,
switch(regOperPtr->op_struct.op_type) {
case(ZINSERT):
ljam();
jam();
if (regTablePtr->subscriptionInsertTriggers.isEmpty()) {
// Table has no active triggers monitoring inserts at commit
ljam();
jam();
goto end;
}
......@@ -508,10 +506,10 @@ void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct,
regOperPtr);
break;
case(ZDELETE):
ljam();
jam();
if (regTablePtr->subscriptionDeleteTriggers.isEmpty()) {
// Table has no active triggers monitoring deletes at commit
ljam();
jam();
goto end;
}
......@@ -522,10 +520,10 @@ void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct,
regOperPtr);
break;
case(ZUPDATE):
ljam();
jam();
if (regTablePtr->subscriptionUpdateTriggers.isEmpty()) {
// Table has no active triggers monitoring updates at commit
ljam();
jam();
goto end;
}
......@@ -553,10 +551,10 @@ Dbtup::fireImmediateTriggers(KeyReqStruct *req_struct,
TriggerPtr trigPtr;
triggerList.first(trigPtr);
while (trigPtr.i != RNIL) {
ljam();
jam();
if (trigPtr.p->monitorAllAttributes ||
trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) {
ljam();
jam();
executeTrigger(req_struct,
trigPtr.p,
regOperPtr);
......@@ -575,10 +573,10 @@ Dbtup::fireDeferredTriggers(Signal* signal,
TriggerPtr trigPtr;
triggerList.first(trigPtr);
while (trigPtr.i != RNIL) {
ljam();
jam();
if (trigPtr.p->monitorAllAttributes ||
trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) {
ljam();
jam();
executeTrigger(req_struct,
trigPtr,
regOperPtr);
......@@ -604,12 +602,12 @@ Dbtup::fireDetachedTriggers(KeyReqStruct *req_struct,
ndbrequire(regOperPtr->is_first_operation());
triggerList.first(trigPtr);
while (trigPtr.i != RNIL) {
ljam();
jam();
if ((trigPtr.p->monitorReplicas ||
regOperPtr->op_struct.primary_replica) &&
(trigPtr.p->monitorAllAttributes ||
trigPtr.p->attributeMask.overlaps(req_struct->changeMask))) {
ljam();
jam();
executeTrigger(req_struct,
trigPtr.p,
regOperPtr);
......@@ -625,7 +623,7 @@ void Dbtup::executeTriggers(KeyReqStruct *req_struct,
TriggerPtr trigPtr;
triggerList.first(trigPtr);
while (trigPtr.i != RNIL) {
ljam();
jam();
executeTrigger(req_struct,
trigPtr.p,
regOperPtr);
......@@ -675,7 +673,7 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct,
ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
if (ref == BACKUP) {
ljam();
jam();
/*
In order for the implementation of BACKUP to work even when changing
primaries in the middle of the backup we need to set the trigger on
......@@ -688,9 +686,9 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct,
signal->theData[0] = trigPtr->triggerId;
signal->theData[1] = regFragPtr.p->fragmentId;
EXECUTE_DIRECT(BACKUP, GSN_BACKUP_TRIG_REQ, signal, 2);
ljamEntry();
jamEntry();
if (signal->theData[0] == 0) {
ljam();
jam();
return;
}
}
......@@ -704,7 +702,7 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct,
noAfterWords,
beforeBuffer,
noBeforeWords)) {
ljam();
jam();
return;
}
//--------------------------------------------------------------------
......@@ -720,13 +718,13 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct,
switch(trigPtr->triggerType) {
case (TriggerType::SECONDARY_INDEX):
ljam();
jam();
ref = req_struct->TC_ref;
executeDirect = false;
break;
case (TriggerType::SUBSCRIPTION):
case (TriggerType::SUBSCRIPTION_BEFORE):
ljam();
jam();
// Since only backup uses subscription triggers we send to backup directly for now
ref = trigPtr->m_receiverBlock;
executeDirect = true;
......@@ -747,22 +745,22 @@ void Dbtup::executeTrigger(KeyReqStruct *req_struct,
switch(regOperPtr->op_struct.op_type) {
case(ZINSERT):
ljam();
jam();
// Send AttrInfo signals with new attribute values
trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES);
sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref);
break;
case(ZDELETE):
if (trigPtr->sendBeforeValues) {
ljam();
jam();
trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
sendTrigAttrInfo(signal, beforeBuffer, noBeforeWords, executeDirect,ref);
}
break;
case(ZUPDATE):
ljam();
jam();
if (trigPtr->sendBeforeValues) {
ljam();
jam();
trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES);
sendTrigAttrInfo(signal, beforeBuffer, noBeforeWords, executeDirect,ref);
}
......@@ -788,9 +786,9 @@ Uint32 Dbtup::setAttrIds(Bitmask<MAXNROFATTRIBUTESINWORDS>& attributeMask,
{
Uint32 bufIndx = 0;
for (Uint32 i = 0; i < m_no_of_attributesibutes; i++) {
ljam();
jam();
if (attributeMask.get(i)) {
ljam();
jam();
AttributeHeader::init(&inBuffer[bufIndx++], i, 0);
}
}
......@@ -858,7 +856,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
Uint32 numAttrsToRead;
if ((regOperPtr->op_struct.op_type == ZUPDATE) &&
(trigPtr->sendOnlyChangedAttributes)) {
ljam();
jam();
//--------------------------------------------------------------------
// Update that sends only changed information
//--------------------------------------------------------------------
......@@ -870,13 +868,13 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
} else if ((regOperPtr->op_struct.op_type == ZDELETE) &&
(!trigPtr->sendBeforeValues)) {
ljam();
jam();
//--------------------------------------------------------------------
// Delete without sending before values only read Primary Key
//--------------------------------------------------------------------
return true;
} else {
ljam();
jam();
//--------------------------------------------------------------------
// All others send all attributes that are monitored, except:
// Omit unchanged blob inlines on update i.e.
......@@ -898,7 +896,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
//--------------------------------------------------------------------
if (regOperPtr->op_struct.op_type != ZDELETE)
{
ljam();
jam();
int ret = readAttributes(req_struct,
&readBuffer[0],
numAttrsToRead,
......@@ -908,7 +906,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
ndbrequire(ret != -1);
noAfterWords= ret;
} else {
ljam();
jam();
noAfterWords = 0;
}
......@@ -920,7 +918,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
if ((regOperPtr->op_struct.op_type == ZUPDATE ||
regOperPtr->op_struct.op_type == ZDELETE) &&
(trigPtr->sendBeforeValues)) {
ljam();
jam();
Tuple_header *save= req_struct->m_tuple_ptr;
PagePtr tmp;
......@@ -956,7 +954,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
// Although a trigger was fired it was not necessary since the old
// value and the new value was exactly the same
//--------------------------------------------------------------------
ljam();
jam();
//XXX does this work with collations?
return false;
}
......@@ -976,21 +974,21 @@ void Dbtup::sendTrigAttrInfo(Signal* signal,
do {
sigLen = dataLen - dataIndex;
if (sigLen > TrigAttrInfo::DataLength) {
ljam();
jam();
sigLen = TrigAttrInfo::DataLength;
}
MEMCOPY_NO_WORDS(trigAttrInfo->getData(),
data + dataIndex,
sigLen);
if (executeDirect) {
ljam();
jam();
EXECUTE_DIRECT(receiverReference,
GSN_TRIG_ATTRINFO,
signal,
TrigAttrInfo::StaticLength + sigLen);
ljamEntry();
jamEntry();
} else {
ljam();
jam();
sendSignal(receiverReference,
GSN_TRIG_ATTRINFO,
signal,
......@@ -1018,15 +1016,15 @@ void Dbtup::sendFireTrigOrd(Signal* signal,
switch(regOperPtr->op_struct.op_type) {
case(ZINSERT):
ljam();
jam();
fireTrigOrd->setTriggerEvent(TriggerEvent::TE_INSERT);
break;
case(ZDELETE):
ljam();
jam();
fireTrigOrd->setTriggerEvent(TriggerEvent::TE_DELETE);
break;
case(ZUPDATE):
ljam();
jam();
fireTrigOrd->setTriggerEvent(TriggerEvent::TE_UPDATE);
break;
default:
......@@ -1040,12 +1038,12 @@ void Dbtup::sendFireTrigOrd(Signal* signal,
switch(trigPtr->triggerType) {
case (TriggerType::SECONDARY_INDEX):
ljam();
jam();
sendSignal(req_struct->TC_ref, GSN_FIRE_TRIG_ORD,
signal, FireTrigOrd::SignalLength, JBB);
break;
case (TriggerType::SUBSCRIPTION_BEFORE): // Only Suma
ljam();
jam();
// Since only backup uses subscription triggers we
// send to backup directly for now
fireTrigOrd->setGCI(req_struct->gci);
......@@ -1056,7 +1054,7 @@ void Dbtup::sendFireTrigOrd(Signal* signal,
FireTrigOrd::SignalWithHashValueLength);
break;
case (TriggerType::SUBSCRIPTION):
ljam();
jam();
// Since only backup uses subscription triggers we
// send to backup directly for now
fireTrigOrd->setGCI(req_struct->gci);
......@@ -1123,7 +1121,7 @@ Dbtup::addTuxEntries(Signal* signal,
Tablerec* regTabPtr)
{
if (ERROR_INSERTED(4022)) {
ljam();
jam();
CLEAR_ERROR_INSERT_VALUE;
terrorCode = 9999;
return -1;
......@@ -1134,12 +1132,12 @@ Dbtup::addTuxEntries(Signal* signal,
Uint32 failPtrI;
triggerList.first(triggerPtr);
while (triggerPtr.i != RNIL) {
ljam();
jam();
req->indexId = triggerPtr.p->indexId;
req->errorCode = RNIL;
if (ERROR_INSERTED(4023) &&
! triggerList.hasNext(triggerPtr)) {
ljam();
jam();
CLEAR_ERROR_INSERT_VALUE;
terrorCode = 9999;
failPtrI = triggerPtr.i;
......@@ -1147,9 +1145,9 @@ Dbtup::addTuxEntries(Signal* signal,
}
EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
signal, TuxMaintReq::SignalLength);
ljamEntry();
jamEntry();
if (req->errorCode != 0) {
ljam();
jam();
terrorCode = req->errorCode;
failPtrI = triggerPtr.i;
goto fail;
......@@ -1161,12 +1159,12 @@ Dbtup::addTuxEntries(Signal* signal,
req->opInfo = TuxMaintReq::OpRemove;
triggerList.first(triggerPtr);
while (triggerPtr.i != failPtrI) {
ljam();
jam();
req->indexId = triggerPtr.p->indexId;
req->errorCode = RNIL;
EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
signal, TuxMaintReq::SignalLength);
ljamEntry();
jamEntry();
ndbrequire(req->errorCode == 0);
triggerList.next(triggerPtr);
}
......@@ -1197,15 +1195,15 @@ Dbtup::executeTuxCommitTriggers(Signal* signal,
if (regOperPtr->op_struct.op_type == ZINSERT) {
if (! regOperPtr->op_struct.delete_insert_flag)
return;
ljam();
jam();
tupVersion= decr_tup_version(regOperPtr->tupVersion);
} else if (regOperPtr->op_struct.op_type == ZUPDATE) {
ljam();
jam();
tupVersion= decr_tup_version(regOperPtr->tupVersion);
} else if (regOperPtr->op_struct.op_type == ZDELETE) {
if (regOperPtr->op_struct.delete_insert_flag)
return;
ljam();
jam();
tupVersion= regOperPtr->tupVersion;
} else {
ndbrequire(false);
......@@ -1231,13 +1229,13 @@ Dbtup::executeTuxAbortTriggers(Signal* signal,
// get version
Uint32 tupVersion;
if (regOperPtr->op_struct.op_type == ZINSERT) {
ljam();
jam();
tupVersion = regOperPtr->tupVersion;
} else if (regOperPtr->op_struct.op_type == ZUPDATE) {
ljam();
jam();
tupVersion = regOperPtr->tupVersion;
} else if (regOperPtr->op_struct.op_type == ZDELETE) {
ljam();
jam();
return;
} else {
ndbrequire(false);
......@@ -1262,12 +1260,12 @@ Dbtup::removeTuxEntries(Signal* signal,
TriggerPtr triggerPtr;
triggerList.first(triggerPtr);
while (triggerPtr.i != RNIL) {
ljam();
jam();
req->indexId = triggerPtr.p->indexId;
req->errorCode = RNIL,
EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ,
signal, TuxMaintReq::SignalLength);
ljamEntry();
jamEntry();
// must succeed
ndbrequire(req->errorCode == 0);
triggerList.next(triggerPtr);
......
......@@ -14,12 +14,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#define DBTUP_C
#define DBTUP_VAR_ALLOC_CPP
#include "Dbtup.hpp"
#define ljam() { jamLine(32000 + __LINE__); }
#define ljamEntry() { jamEntryLine(32000 + __LINE__); }
void Dbtup::init_list_sizes(void)
{
c_min_list_size[0]= 200;
......@@ -109,9 +106,9 @@ Dbtup::alloc_var_part(Fragrecord* fragPtr,
PagePtr pagePtr;
pagePtr.i= get_alloc_page(fragPtr, (alloc_size + 1));
if (pagePtr.i == RNIL) {
ljam();
jam();
if ((pagePtr.i= get_empty_var_page(fragPtr)) == RNIL) {
ljam();
jam();
return 0;
}
c_page_pool.getPtr(pagePtr);
......@@ -127,7 +124,7 @@ Dbtup::alloc_var_part(Fragrecord* fragPtr,
pagePtr.p->page_state = ZTH_MM_FREE;
} else {
c_page_pool.getPtr(pagePtr);
ljam();
jam();
}
Uint32 idx= ((Var_page*)pagePtr.p)
->alloc_record(alloc_size, (Var_page*)ctemp_page, Var_page::CHAIN);
......@@ -178,7 +175,7 @@ void Dbtup::free_var_rec(Fragrecord* fragPtr,
ndbassert(pagePtr.p->free_space <= Var_page::DATA_WORDS);
if (pagePtr.p->free_space == Var_page::DATA_WORDS - 1)
{
ljam();
jam();
/*
This code could be used when we release pages.
remove_free_page(signal,fragPtr,page_header,page_header->list_index);
......@@ -186,7 +183,7 @@ void Dbtup::free_var_rec(Fragrecord* fragPtr,
*/
update_free_page_list(fragPtr, pagePtr);
} else {
ljam();
jam();
update_free_page_list(fragPtr, pagePtr);
}
return;
......@@ -260,16 +257,16 @@ Dbtup::get_alloc_page(Fragrecord* fragPtr, Uint32 alloc_size)
start_index= calculate_free_list_impl(alloc_size);
if (start_index == (MAX_FREE_LIST - 1)) {
ljam();
jam();
} else {
ljam();
jam();
ndbrequire(start_index < (MAX_FREE_LIST - 1));
start_index++;
}
for (i= start_index; i < MAX_FREE_LIST; i++) {
ljam();
jam();
if (!fragPtr->free_var_page_array[i].isEmpty()) {
ljam();
jam();
return fragPtr->free_var_page_array[i].firstItem;
}
}
......@@ -278,9 +275,9 @@ Dbtup::get_alloc_page(Fragrecord* fragPtr, Uint32 alloc_size)
LocalDLList<Page> list(c_page_pool, fragPtr->free_var_page_array[i]);
for(list.first(pagePtr); !pagePtr.isNull() && loop < 16; )
{
ljam();
jam();
if (pagePtr.p->free_space >= alloc_size) {
ljam();
jam();
return pagePtr.i;
}
loop++;
......@@ -347,7 +344,7 @@ void Dbtup::update_free_page_list(Fragrecord* fragPtr,
(free_space > c_max_list_size[list_index])) {
Uint32 new_list_index= calculate_free_list_impl(free_space);
if (list_index != MAX_FREE_LIST) {
ljam();
jam();
/*
* Only remove it from its list if it is in a list
*/
......@@ -362,11 +359,11 @@ void Dbtup::update_free_page_list(Fragrecord* fragPtr,
This can only happen for the free list with least guaranteed
free space.
*/
ljam();
jam();
ndbrequire(new_list_index == 0);
pagePtr.p->list_index= MAX_FREE_LIST;
} else {
ljam();
jam();
LocalDLList<Page> list(c_page_pool,
fragPtr->free_var_page_array[new_list_index]);
list.add(pagePtr);
......@@ -382,9 +379,9 @@ Uint32 Dbtup::calculate_free_list_impl(Uint32 free_space_size) const
{
Uint32 i;
for (i = 0; i < MAX_FREE_LIST; i++) {
ljam();
jam();
if (free_space_size <= c_max_list_size[i]) {
ljam();
jam();
return i;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment