Commit 4c7d6191 authored by unknown's avatar unknown

WL#2868 Fix backup trigger handling

BACKUP previous set up triggers using DICT.
This lead to all kind of trouble.
An smaller alternative to using SUMA for backup
  is to just make BACKUP handle triggers to TUP directly.

This way all triggers will be totally local,
  and error handling is much simpler.

--- old impl.

Start: Master recives GSN_DEFINE_BACKUP_CONF from all participants
Master sends CREATE_TRIG_REQ for all tables to local DICT (dict master)
Master sends START_BACKUP_REQ to all paricipants with trigger ids from DICT
Master sends ALTER_TRIG_REQ (online) to local DICT
Master waits for GCP
Master starts distributed scan
When scan has finished
Master waits for GCP
Master sends DROP_TRIGGER to local DICT
Master sends STOP_BACKUP_REQ to all participants

--- new impl.

Start: Master recives GSN_DEFINE_BACKUP_CONF from all participants
Master sends START_BACKUP_REQ to all paricipants
  Participand sends CREATE_TRIG_REQ for all tables to local TUP
Master waits for GCP
Master starts distributed scan
When scan has finished
Master waits for GCP
Master sends STOP_BACKUP_REQ to all participants
  Participant sends DROP_TRIGGER to local TUP

Changes:
All trigger handling is _local_
 This implies, that abort (e.g due to node failure) can be _local_


fix testBackup test so that it will run successfully with the (now correct)
backup trigger code.


storage/ndb/include/kernel/signaldata/BackupImpl.hpp:
  rework START_BACKUP signals as we no longer need tableId and triggerIds.
storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp:
  START_BACKUP_REQ no longer has tableIds and trigger ids
storage/ndb/src/kernel/blocks/backup/Backup.cpp:
  Use TUP triggers directly.
  
  removes ALTER trigger
  simplifies DROP triggers
  
  changes to node failure handling
  
  changes in signal order
  
  use SlaveData to track slave status.
storage/ndb/src/kernel/blocks/backup/Backup.hpp:
  - remove ALTER_TRIG (now unused)
  - add signalNo to BackupRecord
  - add SlaveData
  - remove dead items from MasterData
  - update prototype of startBackupReply
storage/ndb/src/kernel/blocks/backup/Backup.txt:
  Update signals for new backup code.
storage/ndb/src/kernel/blocks/backup/BackupInit.cpp:
  remove ALTER_TRIG REF and CONF as we no longer use them.
storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp:
  Add comment about meaning of triggerId
  Add sender BlockNumber parameter to dropTrigger.
storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp:
  for SUBSCRIPTION triggers, make it so that the trigger ids are private to each block.
storage/ndb/test/ndbapi/testBackup.cpp:
  Don't do initial restart, just a restart. This is to avoid cache issues with
  schema versions
storage/ndb/test/src/NdbBackup.cpp:
  Update error insertions.
parent 7dbb63fe
......@@ -139,21 +139,11 @@ class StartBackupReq {
friend bool printSTART_BACKUP_REQ(FILE *, const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( MaxTableTriggers = 4 );
STATIC_CONST( HeaderLength = 5 );
STATIC_CONST( TableTriggerLength = 4);
STATIC_CONST( SignalLength = 2 );
private:
Uint32 backupId;
Uint32 backupPtr;
Uint32 signalNo;
Uint32 noOfSignals;
Uint32 noOfTableTriggers;
struct TableTriggers {
Uint32 tableId;
Uint32 triggerIds[3];
} tableTriggers[MaxTableTriggers];
};
class StartBackupRef {
......@@ -169,7 +159,7 @@ class StartBackupRef {
friend bool printSTART_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( SignalLength = 5 );
STATIC_CONST( SignalLength = 4 );
enum ErrorCode {
FailedToAllocateTriggerRecord = 1
......@@ -177,7 +167,6 @@ public:
private:
Uint32 backupId;
Uint32 backupPtr;
Uint32 signalNo;
Uint32 errorCode;
Uint32 nodeId;
};
......@@ -195,12 +184,11 @@ class StartBackupConf {
friend bool printSTART_BACKUP_CONF(FILE *, const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( SignalLength = 3 );
STATIC_CONST( SignalLength = 2 );
private:
Uint32 backupId;
Uint32 backupPtr;
Uint32 signalNo;
};
class BackupFragmentReq {
......
......@@ -48,16 +48,8 @@ printDEFINE_BACKUP_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){
bool
printSTART_BACKUP_REQ(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){
StartBackupReq* sig = (StartBackupReq*)data;
fprintf(out, " backupPtr: %d backupId: %d signalNo: %d of %d\n",
sig->backupPtr, sig->backupId,
sig->signalNo + 1, sig->noOfSignals);
for(Uint32 i = 0; i<sig->noOfTableTriggers; i++)
fprintf(out,
" Table: %d Triggers = [ insert: %d update: %d delete: %d ]\n",
sig->tableTriggers[i].tableId,
sig->tableTriggers[i].triggerIds[TriggerEvent::TE_INSERT],
sig->tableTriggers[i].triggerIds[TriggerEvent::TE_UPDATE],
sig->tableTriggers[i].triggerIds[TriggerEvent::TE_DELETE]);
fprintf(out, " backupPtr: %d backupId: %d\n",
sig->backupPtr, sig->backupId);
return true;
}
......
......@@ -501,12 +501,6 @@ const TriggerEvent::Value triggerEventValues[] = {
TriggerEvent::TE_DELETE
};
const char* triggerNameFormat[] = {
"NDB$BACKUP_%d_%d_INSERT",
"NDB$BACKUP_%d_%d_UPDATE",
"NDB$BACKUP_%d_%d_DELETE"
};
const Backup::State
Backup::validSlaveTransitions[] = {
INITIAL, DEFINING,
......@@ -776,7 +770,6 @@ Backup::checkNodeFail(Signal* signal,
ref->backupPtr = ptr.i;
ref->backupId = ptr.p->backupId;
ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail;
ref->signalNo = ptr.p->masterData.startBackup.signalNo;
gsn= GSN_START_BACKUP_REF;
len= StartBackupRef::SignalLength;
pos= &ref->nodeId - signal->getDataPtr();
......@@ -928,8 +921,6 @@ Backup::execBACKUP_REQ(Signal* signal)
ptr.p->backupKey[1] = 0;
ptr.p->backupDataLen = 0;
ptr.p->masterData.errorCode = 0;
ptr.p->masterData.dropTrig.tableId = RNIL;
ptr.p->masterData.alterTrig.tableId = RNIL;
UtilSequenceReq * utilReq = (UtilSequenceReq*)signal->getDataPtrSend();
......@@ -1243,11 +1234,16 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3+NdbNodeBitmask::Size, JBB);
/**
* Prepare Trig
* We've received GSN_DEFINE_BACKUP_CONF from all participants.
*
* Our next step is to send START_BACKUP_REQ to all participants,
* who will then send CREATE_TRIG_REQ for all tables to their local
* DBTUP.
*/
TablePtr tabPtr;
ndbrequire(ptr.p->tables.first(tabPtr));
sendCreateTrig(signal, ptr, tabPtr);
ptr.p->tables.first(tabPtr);
sendStartBackup(signal, ptr, tabPtr);
}
/*****************************************************************************
......@@ -1276,11 +1272,51 @@ Backup::sendCreateTrig(Signal* signal,
{
CreateTrigReq * req =(CreateTrigReq *)signal->getDataPtrSend();
ptr.p->masterData.gsn = GSN_CREATE_TRIG_REQ;
ptr.p->masterData.sendCounter = 3;
ptr.p->masterData.createTrig.tableId = tabPtr.p->tableId;
/*
* First, setup the structures
*/
for(Uint32 j=0; j<3; j++) {
jam();
TriggerPtr trigPtr;
if(!ptr.p->triggers.seize(trigPtr)) {
jam();
ptr.p->m_gsn = GSN_START_BACKUP_REF;
StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend();
ref->backupPtr = ptr.i;
ref->backupId = ptr.p->backupId;
ref->errorCode = StartBackupRef::FailedToAllocateTriggerRecord;
ref->nodeId = getOwnNodeId();
sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal,
StartBackupRef::SignalLength, JBB);
return;
} // if
const Uint32 triggerId= trigPtr.i;
tabPtr.p->triggerIds[j] = triggerId;
tabPtr.p->triggerAllocated[j] = true;
trigPtr.p->backupPtr = ptr.i;
trigPtr.p->tableId = tabPtr.p->tableId;
trigPtr.p->tab_ptr_i = tabPtr.i;
trigPtr.p->logEntry = 0;
trigPtr.p->event = j;
trigPtr.p->maxRecordSize = 2048;
trigPtr.p->operation =
&ptr.p->files.getPtr(ptr.p->logFilePtr)->operation;
trigPtr.p->operation->noOfBytes = 0;
trigPtr.p->operation->noOfRecords = 0;
trigPtr.p->errorCode = 0;
} // for
/*
* now ask DBTUP to create
*/
ptr.p->slaveData.gsn = GSN_CREATE_TRIG_REQ;
ptr.p->slaveData.trigSendCounter = 3;
ptr.p->slaveData.createTrig.tableId = tabPtr.p->tableId;
req->setUserRef(reference());
req->setReceiverRef(reference());
req->setConnectionPtr(ptr.i);
req->setRequestType(CreateTrigReq::RT_USER);
......@@ -1289,29 +1325,18 @@ Backup::sendCreateTrig(Signal* signal,
req->setAttributeMask(attrMask);
req->setTableId(tabPtr.p->tableId);
req->setIndexId(RNIL); // not used
req->setTriggerId(RNIL); // to be created
req->setTriggerType(TriggerType::SUBSCRIPTION);
req->setTriggerActionTime(TriggerActionTime::TA_DETACHED);
req->setMonitorReplicas(true);
req->setMonitorAllAttributes(false);
req->setOnline(false); // leave trigger offline
char triggerName[MAX_TAB_NAME_SIZE];
Uint32 nameBuffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string
LinearWriter w(nameBuffer, sizeof(nameBuffer) >> 2);
LinearSectionPtr lsPtr[3];
req->setOnline(true);
for (int i=0; i < 3; i++) {
req->setTriggerId(tabPtr.p->triggerIds[i]);
req->setTriggerEvent(triggerEventValues[i]);
req->setReportAllMonitoredAttributes(false);
BaseString::snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i],
ptr.p->backupId, tabPtr.p->tableId);
w.reset();
w.add(CreateTrigReq::TriggerNameKey, triggerName);
lsPtr[0].p = nameBuffer;
lsPtr[0].sz = w.getWordsUsed();
sendSignal(DBDICT_REF, GSN_CREATE_TRIG_REQ,
signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
sendSignal(DBTUP_REF, GSN_CREATE_TRIG_REQ,
signal, CreateTrigReq::SignalLength, JBB);
}
}
......@@ -1331,25 +1356,25 @@ Backup::execCREATE_TRIG_CONF(Signal* signal)
/**
* Verify that I'm waiting for this conf
*
* ptr.p->masterRef != reference()
* as slaves and masters have triggers now.
*/
ndbrequire(ptr.p->masterRef == reference());
ndbrequire(ptr.p->masterData.gsn == GSN_CREATE_TRIG_REQ);
ndbrequire(ptr.p->masterData.sendCounter.done() == false);
ndbrequire(ptr.p->masterData.createTrig.tableId == tableId);
ndbrequire(ptr.p->slaveData.gsn == GSN_CREATE_TRIG_REQ);
ndbrequire(ptr.p->slaveData.trigSendCounter.done() == false);
ndbrequire(ptr.p->slaveData.createTrig.tableId == tableId);
TablePtr tabPtr;
ndbrequire(findTable(ptr, tabPtr, tableId));
ndbrequire(type < 3); // if some decides to change the enums
ndbrequire(tabPtr.p->triggerIds[type] == ILLEGAL_TRIGGER_ID);
tabPtr.p->triggerIds[type] = triggerId;
createTrigReply(signal, ptr);
}
void
Backup::execCREATE_TRIG_REF(Signal* signal)
{
jamEntry();
CreateTrigRef* ref = (CreateTrigRef*)signal->getDataPtr();
const Uint32 ptrI = ref->getConnectionPtr();
......@@ -1360,11 +1385,13 @@ Backup::execCREATE_TRIG_REF(Signal* signal)
/**
* Verify that I'm waiting for this ref
*
* ptr.p->masterRef != reference()
* as slaves and masters have triggers now
*/
ndbrequire(ptr.p->masterRef == reference());
ndbrequire(ptr.p->masterData.gsn == GSN_CREATE_TRIG_REQ);
ndbrequire(ptr.p->masterData.sendCounter.done() == false);
ndbrequire(ptr.p->masterData.createTrig.tableId == tableId);
ndbrequire(ptr.p->slaveData.gsn == GSN_CREATE_TRIG_REQ);
ndbrequire(ptr.p->slaveData.trigSendCounter.done() == false);
ndbrequire(ptr.p->slaveData.createTrig.tableId == tableId);
ptr.p->setErrorCode(ref->getErrorCode());
......@@ -1379,8 +1406,8 @@ Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr)
/**
* Check finished with table
*/
ptr.p->masterData.sendCounter--;
if(ptr.p->masterData.sendCounter.done() == false){
ptr.p->slaveData.trigSendCounter--;
if(ptr.p->slaveData.trigSendCounter.done() == false){
jam();
return;
}//if
......@@ -1392,12 +1419,19 @@ Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr)
if(ptr.p->checkError()) {
jam();
masterAbort(signal, ptr);
ptr.p->m_gsn = GSN_START_BACKUP_REF;
StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend();
ref->backupPtr = ptr.i;
ref->backupId = ptr.p->backupId;
ref->errorCode = ptr.p->errorCode;
ref->nodeId = getOwnNodeId();
sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal,
StartBackupRef::SignalLength, JBB);
return;
}//if
TablePtr tabPtr;
ndbrequire(findTable(ptr, tabPtr, ptr.p->masterData.createTrig.tableId));
ndbrequire(findTable(ptr, tabPtr, ptr.p->slaveData.createTrig.tableId));
/**
* Next table
......@@ -1410,14 +1444,16 @@ Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr)
}//if
/**
* Finished with all tables, send StartBackupReq
* We've finished creating triggers.
*
* send conf and wait
*/
ptr.p->tables.first(tabPtr);
ptr.p->masterData.startBackup.signalNo = 0;
ptr.p->masterData.startBackup.noOfSignals =
(ptr.p->tables.noOfElements() + StartBackupReq::MaxTableTriggers - 1) /
StartBackupReq::MaxTableTriggers;
sendStartBackup(signal, ptr, tabPtr);
ptr.p->m_gsn = GSN_START_BACKUP_CONF;
StartBackupConf* conf = (StartBackupConf*)signal->getDataPtrSend();
conf->backupPtr = ptr.i;
conf->backupId = ptr.p->backupId;
sendSignal(ptr.p->masterRef, GSN_START_BACKUP_CONF, signal,
StartBackupConf::SignalLength, JBB);
}
/*****************************************************************************
......@@ -1434,29 +1470,19 @@ Backup::sendStartBackup(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr)
StartBackupReq* req = (StartBackupReq*)signal->getDataPtrSend();
req->backupId = ptr.p->backupId;
req->backupPtr = ptr.i;
req->signalNo = ptr.p->masterData.startBackup.signalNo;
req->noOfSignals = ptr.p->masterData.startBackup.noOfSignals;
Uint32 i;
for(i = 0; i<StartBackupReq::MaxTableTriggers; i++) {
jam();
req->tableTriggers[i].tableId = tabPtr.p->tableId;
req->tableTriggers[i].triggerIds[0] = tabPtr.p->triggerIds[0];
req->tableTriggers[i].triggerIds[1] = tabPtr.p->triggerIds[1];
req->tableTriggers[i].triggerIds[2] = tabPtr.p->triggerIds[2];
if(!ptr.p->tables.next(tabPtr)){
jam();
i++;
break;
}//if
}//for
req->noOfTableTriggers = i;
/**
* We use trigger Ids that are unique to BACKUP.
* These don't interfere with other triggers (e.g. from DBDICT)
* as there is a special case in DBTUP.
*
* Consequently, backups during online upgrade won't work
*/
ptr.p->masterData.gsn = GSN_START_BACKUP_REQ;
ptr.p->masterData.sendCounter = ptr.p->nodes;
NodeReceiverGroup rg(BACKUP, ptr.p->nodes);
sendSignal(rg, GSN_START_BACKUP_REQ, signal,
StartBackupReq::HeaderLength +
(i * StartBackupReq::TableTriggerLength), JBB);
StartBackupReq::SignalLength, JBB);
}
void
......@@ -1467,14 +1493,13 @@ Backup::execSTART_BACKUP_REF(Signal* signal)
StartBackupRef* ref = (StartBackupRef*)signal->getDataPtr();
const Uint32 ptrI = ref->backupPtr;
//const Uint32 backupId = ref->backupId;
const Uint32 signalNo = ref->signalNo;
const Uint32 nodeId = ref->nodeId;
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
ptr.p->setErrorCode(ref->errorCode);
startBackupReply(signal, ptr, nodeId, signalNo);
startBackupReply(signal, ptr, nodeId);
}
void
......@@ -1485,23 +1510,20 @@ Backup::execSTART_BACKUP_CONF(Signal* signal)
StartBackupConf* conf = (StartBackupConf*)signal->getDataPtr();
const Uint32 ptrI = conf->backupPtr;
//const Uint32 backupId = conf->backupId;
const Uint32 signalNo = conf->signalNo;
const Uint32 nodeId = refToNode(signal->senderBlockRef());
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
startBackupReply(signal, ptr, nodeId, signalNo);
startBackupReply(signal, ptr, nodeId);
}
void
Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr,
Uint32 nodeId, Uint32 signalNo)
Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
{
CRASH_INSERTION((10004));
ndbrequire(ptr.p->masterData.startBackup.signalNo == signalNo);
if (!haveAllSignals(ptr, GSN_START_BACKUP_REQ, nodeId)) {
jam();
return;
......@@ -1518,80 +1540,8 @@ Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr,
return;
}
TablePtr tabPtr;
c_tablePool.getPtr(tabPtr, ptr.p->masterData.startBackup.tablePtr);
for(Uint32 i = 0; i<StartBackupReq::MaxTableTriggers; i++) {
jam();
if(!ptr.p->tables.next(tabPtr)) {
jam();
break;
}//if
}//for
if(tabPtr.i != RNIL) {
jam();
ptr.p->masterData.startBackup.signalNo++;
sendStartBackup(signal, ptr, tabPtr);
return;
}
sendAlterTrig(signal, ptr);
}
/*****************************************************************************
*
* Master functionallity - Activate triggers
*
*****************************************************************************/
void
Backup::sendAlterTrig(Signal* signal, BackupRecordPtr ptr)
{
AlterTrigReq * req =(AlterTrigReq *)signal->getDataPtrSend();
ptr.p->masterData.gsn = GSN_ALTER_TRIG_REQ;
ptr.p->masterData.sendCounter = 0;
req->setUserRef(reference());
req->setConnectionPtr(ptr.i);
req->setRequestType(AlterTrigReq::RT_USER);
req->setTriggerInfo(0); // not used on ALTER via DICT
req->setOnline(true);
req->setReceiverRef(reference());
TablePtr tabPtr;
if (ptr.p->masterData.alterTrig.tableId == RNIL) {
jam();
ptr.p->tables.first(tabPtr);
} else {
jam();
ndbrequire(findTable(ptr, tabPtr, ptr.p->masterData.alterTrig.tableId));
ptr.p->tables.next(tabPtr);
}//if
if (tabPtr.i != RNIL) {
jam();
ptr.p->masterData.alterTrig.tableId = tabPtr.p->tableId;
req->setTableId(tabPtr.p->tableId);
req->setTriggerId(tabPtr.p->triggerIds[0]);
sendSignal(DBDICT_REF, GSN_ALTER_TRIG_REQ,
signal, AlterTrigReq::SignalLength, JBB);
req->setTriggerId(tabPtr.p->triggerIds[1]);
sendSignal(DBDICT_REF, GSN_ALTER_TRIG_REQ,
signal, AlterTrigReq::SignalLength, JBB);
req->setTriggerId(tabPtr.p->triggerIds[2]);
sendSignal(DBDICT_REF, GSN_ALTER_TRIG_REQ,
signal, AlterTrigReq::SignalLength, JBB);
ptr.p->masterData.sendCounter += 3;
return;
}//if
ptr.p->masterData.alterTrig.tableId = RNIL;
/**
* Finished with all tables
* Wait for GCP
*/
ptr.p->masterData.gsn = GSN_WAIT_GCP_REQ;
ptr.p->masterData.waitGCP.startBackup = true;
......@@ -1604,62 +1554,6 @@ Backup::sendAlterTrig(Signal* signal, BackupRecordPtr ptr)
WaitGCPReq::SignalLength,JBB);
}
void
Backup::execALTER_TRIG_CONF(Signal* signal)
{
jamEntry();
AlterTrigConf* conf = (AlterTrigConf*)signal->getDataPtr();
const Uint32 ptrI = conf->getConnectionPtr();
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
alterTrigReply(signal, ptr);
}
void
Backup::execALTER_TRIG_REF(Signal* signal)
{
jamEntry();
AlterTrigRef* ref = (AlterTrigRef*)signal->getDataPtr();
const Uint32 ptrI = ref->getConnectionPtr();
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
ptr.p->setErrorCode(ref->getErrorCode());
alterTrigReply(signal, ptr);
}
void
Backup::alterTrigReply(Signal* signal, BackupRecordPtr ptr)
{
CRASH_INSERTION((10005));
ndbrequire(ptr.p->masterRef == reference());
ndbrequire(ptr.p->masterData.gsn == GSN_ALTER_TRIG_REQ);
ndbrequire(ptr.p->masterData.sendCounter.done() == false);
ptr.p->masterData.sendCounter--;
if(ptr.p->masterData.sendCounter.done() == false){
jam();
return;
}//if
if(ptr.p->checkError()){
jam();
masterAbort(signal, ptr);
return;
}//if
sendAlterTrig(signal, ptr);
}
void
Backup::execWAIT_GCP_REF(Signal* signal)
{
......@@ -1720,7 +1614,12 @@ Backup::execWAIT_GCP_CONF(Signal* signal){
{
CRASH_INSERTION((10009));
ptr.p->stopGCP = gcp;
sendDropTrig(signal, ptr); // regular dropping of triggers
/**
* Backup is complete - begin cleanup
* STOP_BACKUP_REQ is sent to participants.
* They then drop the local triggers
*/
sendStopBackup(signal, ptr);
return;
}//if
......@@ -1928,7 +1827,7 @@ err:
/*****************************************************************************
*
* Master functionallity - Drop triggers
* Slave functionallity - Drop triggers
*
*****************************************************************************/
......@@ -1936,23 +1835,63 @@ void
Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr)
{
TablePtr tabPtr;
if (ptr.p->masterData.dropTrig.tableId == RNIL) {
ptr.p->slaveData.gsn = GSN_DROP_TRIG_REQ;
if (ptr.p->slaveData.dropTrig.tableId == RNIL) {
jam();
ptr.p->tables.first(tabPtr);
} else {
jam();
ndbrequire(findTable(ptr, tabPtr, ptr.p->masterData.dropTrig.tableId));
ndbrequire(findTable(ptr, tabPtr, ptr.p->slaveData.dropTrig.tableId));
ptr.p->tables.next(tabPtr);
}//if
if (tabPtr.i != RNIL) {
jam();
sendDropTrig(signal, ptr, tabPtr);
} else {
jam();
ptr.p->masterData.dropTrig.tableId = RNIL;
/**
* Insert footers
*/
{
BackupFilePtr filePtr;
ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr);
Uint32 * dst;
ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, 1));
* dst = 0;
filePtr.p->operation.dataBuffer.updateWritePtr(1);
}
sendStopBackup(signal, ptr);
}//if
{
BackupFilePtr filePtr;
ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
const Uint32 gcpSz = sizeof(BackupFormat::CtlFile::GCPEntry) >> 2;
Uint32 * dst;
ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, gcpSz));
BackupFormat::CtlFile::GCPEntry * gcp =
(BackupFormat::CtlFile::GCPEntry*)dst;
gcp->SectionType = htonl(BackupFormat::GCP_ENTRY);
gcp->SectionLength = htonl(gcpSz);
gcp->StartGCP = htonl(ptr.p->startGCP);
gcp->StopGCP = htonl(ptr.p->stopGCP - 1);
filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz);
}
{ // UNLOCK while dropping trigger for better timeslicing
TablePtr tabPtr;
for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL;
ptr.p->tables.next(tabPtr))
{
signal->theData[0] = tabPtr.p->tableId;
signal->theData[1] = 0; // unlock
EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2);
}
}
closeFiles(signal, ptr);
}
}
void
......@@ -1961,40 +1900,26 @@ Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr)
jam();
DropTrigReq * req = (DropTrigReq *)signal->getDataPtrSend();
ptr.p->masterData.gsn = GSN_DROP_TRIG_REQ;
ptr.p->masterData.sendCounter = 0;
ptr.p->slaveData.gsn = GSN_DROP_TRIG_REQ;
ptr.p->slaveData.trigSendCounter = 0;
req->setConnectionPtr(ptr.i);
req->setUserRef(reference()); // Sending to myself
req->setRequestType(DropTrigReq::RT_USER);
req->setIndexId(RNIL);
req->setTriggerInfo(0); // not used on DROP via DICT
char triggerName[MAX_TAB_NAME_SIZE];
Uint32 nameBuffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string
LinearWriter w(nameBuffer, sizeof(nameBuffer) >> 2);
LinearSectionPtr lsPtr[3];
req->setTriggerInfo(0); // not used on DROP
req->setTriggerType(TriggerType::SUBSCRIPTION);
req->setTriggerActionTime(TriggerActionTime::TA_DETACHED);
ptr.p->masterData.dropTrig.tableId = tabPtr.p->tableId;
ptr.p->slaveData.dropTrig.tableId = tabPtr.p->tableId;
req->setTableId(tabPtr.p->tableId);
for (int i = 0; i < 3; i++) {
Uint32 id = tabPtr.p->triggerIds[i];
req->setTriggerId(id);
if (id != ILLEGAL_TRIGGER_ID) {
sendSignal(DBDICT_REF, GSN_DROP_TRIG_REQ,
req->setTriggerEvent(triggerEventValues[i]);
sendSignal(DBTUP_REF, GSN_DROP_TRIG_REQ,
signal, DropTrigReq::SignalLength, JBB);
} else {
BaseString::snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i],
ptr.p->backupId, tabPtr.p->tableId);
w.reset();
w.add(CreateTrigReq::TriggerNameKey, triggerName);
lsPtr[0].p = nameBuffer;
lsPtr[0].sz = w.getWordsUsed();
sendSignal(DBDICT_REF, GSN_DROP_TRIG_REQ,
signal, DropTrigReq::SignalLength, JBB, lsPtr, 1);
}
ptr.p->masterData.sendCounter ++;
ptr.p->slaveData.trigSendCounter ++;
}
}
......@@ -2009,7 +1934,9 @@ Backup::execDROP_TRIG_REF(Signal* signal)
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
//ndbrequire(ref->getErrorCode() == DropTrigRef::NoSuchTrigger);
ndbout << "ERROR DROPPING TRIGGER: " << ref->getConf()->getTriggerId();
ndbout << " Err: " << (Uint32)ref->getErrorCode() << endl << endl;
dropTrigReply(signal, ptr);
}
......@@ -2020,6 +1947,7 @@ Backup::execDROP_TRIG_CONF(Signal* signal)
DropTrigConf* conf = (DropTrigConf*)signal->getDataPtr();
const Uint32 ptrI = conf->getConnectionPtr();
const Uint32 triggerId= conf->getTriggerId();
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
......@@ -2030,15 +1958,14 @@ Backup::execDROP_TRIG_CONF(Signal* signal)
void
Backup::dropTrigReply(Signal* signal, BackupRecordPtr ptr)
{
CRASH_INSERTION((10012));
ndbrequire(ptr.p->masterRef == reference());
ndbrequire(ptr.p->masterData.gsn == GSN_DROP_TRIG_REQ);
ndbrequire(ptr.p->masterData.sendCounter.done() == false);
ndbrequire(ptr.p->slaveData.gsn == GSN_DROP_TRIG_REQ);
ndbrequire(ptr.p->slaveData.trigSendCounter.done() == false);
ptr.p->masterData.sendCounter--;
if(ptr.p->masterData.sendCounter.done() == false){
// move from .masterData to .slaveData
ptr.p->slaveData.trigSendCounter--;
if(ptr.p->slaveData.trigSendCounter.done() == false){
jam();
return;
}//if
......@@ -2165,6 +2092,9 @@ Backup::masterAbort(Signal* signal, BackupRecordPtr ptr)
#ifdef DEBUG_ABORT
ndbout_c("************ masterAbort");
#endif
ndbassert(ptr.p->masterRef == reference());
if(ptr.p->masterData.errorCode != 0)
{
jam();
......@@ -2208,13 +2138,13 @@ Backup::masterAbort(Signal* signal, BackupRecordPtr ptr)
case GSN_BACKUP_FRAGMENT_REQ:
jam();
ptr.p->stopGCP= ptr.p->startGCP + 1;
sendDropTrig(signal, ptr); // dropping due to error
sendStopBackup(signal, ptr); // dropping due to error
return;
case GSN_UTIL_SEQUENCE_REQ:
case GSN_UTIL_LOCK_REQ:
case GSN_DROP_TRIG_REQ:
ndbrequire(false);
return;
case GSN_DROP_TRIG_REQ:
case GSN_STOP_BACKUP_REQ:
return;
}
......@@ -2329,6 +2259,7 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal)
ptr.p->m_gsn = GSN_DEFINE_BACKUP_REQ;
ptr.p->slaveState.forceState(INITIAL);
ptr.p->slaveState.setState(DEFINING);
ptr.p->slaveData.dropTrig.tableId = RNIL;
ptr.p->errorCode = 0;
ptr.p->clientRef = req->clientRef;
ptr.p->clientData = req->clientData;
......@@ -2345,14 +2276,14 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal)
ptr.p->backupKey[0] = req->backupKey[0];
ptr.p->backupKey[1] = req->backupKey[1];
ptr.p->backupDataLen = req->backupDataLen;
ptr.p->masterData.dropTrig.tableId = RNIL;
ptr.p->masterData.alterTrig.tableId = RNIL;
ptr.p->masterData.errorCode = 0;
ptr.p->noOfBytes = 0;
ptr.p->noOfRecords = 0;
ptr.p->noOfLogBytes = 0;
ptr.p->noOfLogRecords = 0;
ptr.p->currGCP = 0;
ptr.p->startGCP = 0;
ptr.p->stopGCP = 0;
/**
* Allocate files
......@@ -3264,8 +3195,6 @@ Backup::execSTART_BACKUP_REQ(Signal* signal)
StartBackupReq* req = (StartBackupReq*)signal->getDataPtr();
const Uint32 ptrI = req->backupPtr;
//const Uint32 backupId = req->backupId;
const Uint32 signalNo = req->signalNo;
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
......@@ -3273,45 +3202,6 @@ Backup::execSTART_BACKUP_REQ(Signal* signal)
ptr.p->slaveState.setState(STARTED);
ptr.p->m_gsn = GSN_START_BACKUP_REQ;
for(Uint32 i = 0; i<req->noOfTableTriggers; i++) {
jam();
TablePtr tabPtr;
ndbrequire(findTable(ptr, tabPtr, req->tableTriggers[i].tableId));
for(Uint32 j = 0; j<3; j++) {
jam();
const Uint32 triggerId = req->tableTriggers[i].triggerIds[j];
tabPtr.p->triggerIds[j] = triggerId;
TriggerPtr trigPtr;
if(!ptr.p->triggers.seizeId(trigPtr, triggerId)) {
jam();
ptr.p->m_gsn = GSN_START_BACKUP_REF;
StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend();
ref->backupPtr = ptr.i;
ref->backupId = ptr.p->backupId;
ref->signalNo = signalNo;
ref->errorCode = StartBackupRef::FailedToAllocateTriggerRecord;
ref->nodeId = getOwnNodeId();
sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal,
StartBackupRef::SignalLength, JBB);
return;
}//if
tabPtr.p->triggerAllocated[j] = true;
trigPtr.p->backupPtr = ptr.i;
trigPtr.p->tableId = tabPtr.p->tableId;
trigPtr.p->tab_ptr_i = tabPtr.i;
trigPtr.p->logEntry = 0;
trigPtr.p->event = j;
trigPtr.p->maxRecordSize = 2048;
trigPtr.p->operation =
&ptr.p->files.getPtr(ptr.p->logFilePtr)->operation;
trigPtr.p->operation->noOfBytes = 0;
trigPtr.p->operation->noOfRecords = 0;
trigPtr.p->errorCode = 0;
}//for
}//for
/**
* Start file threads...
*/
......@@ -3329,13 +3219,12 @@ Backup::execSTART_BACKUP_REQ(Signal* signal)
}//if
}//for
ptr.p->m_gsn = GSN_START_BACKUP_CONF;
StartBackupConf* conf = (StartBackupConf*)signal->getDataPtrSend();
conf->backupPtr = ptr.i;
conf->backupId = ptr.p->backupId;
conf->signalNo = signalNo;
sendSignal(ptr.p->masterRef, GSN_START_BACKUP_CONF, signal,
StartBackupConf::SignalLength, JBB);
/**
* Tell DBTUP to create triggers
*/
TablePtr tabPtr;
ndbrequire(ptr.p->tables.first(tabPtr));
sendCreateTrig(signal, ptr, tabPtr);
}
/*****************************************************************************
......@@ -3990,10 +3879,13 @@ Backup::execBACKUP_TRIG_REQ(Signal* signal)
Uint32 result;
jamEntry();
c_triggerPool.getPtr(trigPtr, trigger_id);
c_tablePool.getPtr(tabPtr, trigPtr.p->tab_ptr_i);
tabPtr.p->fragments.getPtr(fragPtr, frag_id);
if (fragPtr.p->node != getOwnNodeId()) {
jam();
result = ZFALSE;
} else {
......@@ -4061,10 +3953,21 @@ Backup::execTRIG_ATTRINFO(Signal* signal) {
trigPtr.p->logEntry = logEntry;
logEntry->Length = 0;
logEntry->TableId = htonl(trigPtr.p->tableId);
logEntry->TriggerEvent = htonl(trigPtr.p->event);
if(trigPtr.p->event==0)
logEntry->TriggerEvent= htonl(TriggerEvent::TE_INSERT);
else if(trigPtr.p->event==1)
logEntry->TriggerEvent= htonl(TriggerEvent::TE_UPDATE);
else if(trigPtr.p->event==2)
logEntry->TriggerEvent= htonl(TriggerEvent::TE_DELETE);
else {
ndbout << "Bad Event: " << trigPtr.p->event << endl;
ndbrequire(false);
}
} else {
ndbrequire(logEntry->TableId == htonl(trigPtr.p->tableId));
ndbrequire(logEntry->TriggerEvent == htonl(trigPtr.p->event));
// ndbrequire(logEntry->TriggerEvent == htonl(trigPtr.p->event));
}//if
const Uint32 pos = logEntry->Length;
......@@ -4103,12 +4006,11 @@ Backup::execFIRE_TRIG_ORD(Signal* signal)
if(gci != ptr.p->currGCP)
{
jam();
trigPtr.p->logEntry->TriggerEvent = htonl(trigPtr.p->event | 0x10000);
trigPtr.p->logEntry->TriggerEvent|= htonl(0x10000);
trigPtr.p->logEntry->Data[len] = htonl(gci);
len++;
ptr.p->currGCP = gci;
}//if
}
len += (sizeof(BackupFormat::LogFile::LogEntry) >> 2) - 2;
trigPtr.p->logEntry->Length = htonl(len);
......@@ -4174,50 +4076,13 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal)
ptr.p->slaveState.setState(STOPPING);
ptr.p->m_gsn = GSN_STOP_BACKUP_REQ;
ptr.p->startGCP= startGCP;
ptr.p->stopGCP= stopGCP;
/**
* Insert footers
* Destroy the triggers in local DBTUP we created
*/
{
BackupFilePtr filePtr;
ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr);
Uint32 * dst;
ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, 1));
* dst = 0;
filePtr.p->operation.dataBuffer.updateWritePtr(1);
}
{
BackupFilePtr filePtr;
ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
const Uint32 gcpSz = sizeof(BackupFormat::CtlFile::GCPEntry) >> 2;
Uint32 * dst;
ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, gcpSz));
BackupFormat::CtlFile::GCPEntry * gcp =
(BackupFormat::CtlFile::GCPEntry*)dst;
gcp->SectionType = htonl(BackupFormat::GCP_ENTRY);
gcp->SectionLength = htonl(gcpSz);
gcp->StartGCP = htonl(startGCP);
gcp->StopGCP = htonl(stopGCP - 1);
filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz);
}
{
TablePtr tabPtr;
for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL;
ptr.p->tables.next(tabPtr))
{
signal->theData[0] = tabPtr.p->tableId;
signal->theData[1] = 0; // unlock
EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2);
}
}
closeFiles(signal, ptr);
sendDropTrig(signal, ptr);
}
void
......@@ -4484,18 +4349,9 @@ Backup::execABORT_BACKUP_ORD(Signal* signal)
ptr.p->nodes.clear();
ptr.p->nodes.set(getOwnNodeId());
if(ref == reference())
{
ptr.p->stopGCP= ptr.p->startGCP + 1;
sendDropTrig(signal, ptr);
}
else
{
ptr.p->masterData.gsn = GSN_STOP_BACKUP_REQ;
ptr.p->masterData.sendCounter.clearWaitingFor();
ptr.p->masterData.sendCounter.setWaitingFor(getOwnNodeId());
closeFiles(signal, ptr);
}
sendStopBackup(signal, ptr);
}
......
......@@ -96,8 +96,6 @@ protected:
void execGET_TABINFO_CONF(Signal* signal);
void execCREATE_TRIG_REF(Signal* signal);
void execCREATE_TRIG_CONF(Signal* signal);
void execALTER_TRIG_REF(Signal* signal);
void execALTER_TRIG_CONF(Signal* signal);
void execDROP_TRIG_REF(Signal* signal);
void execDROP_TRIG_CONF(Signal* signal);
......@@ -426,6 +424,7 @@ public:
Uint32 clientRef;
Uint32 clientData;
Uint32 flags;
Uint32 signalNo;
Uint32 backupId;
Uint32 backupKey[2];
Uint32 masterRef;
......@@ -452,6 +451,17 @@ public:
Array<Page32> pages; // Used for (un)packing backup request
SimpleProperties props;// Used for (un)packing backup request
struct SlaveData {
SignalCounter trigSendCounter;
Uint32 gsn;
struct {
Uint32 tableId;
} createTrig;
struct {
Uint32 tableId;
} dropTrig;
} slaveData;
struct MasterData {
MasterData(Backup & b)
{
......@@ -462,15 +472,6 @@ public:
Uint32 gsn;
SignalCounter sendCounter;
Uint32 errorCode;
struct {
Uint32 tableId;
} createTrig;
struct {
Uint32 tableId;
} dropTrig;
struct {
Uint32 tableId;
} alterTrig;
union {
struct {
Uint32 startBackup;
......@@ -563,7 +564,7 @@ public:
void defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId);
void createTrigReply(Signal* signal, BackupRecordPtr ptr);
void alterTrigReply(Signal* signal, BackupRecordPtr ptr);
void startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32, Uint32);
void startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32);
void stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId);
void defineBackupRef(Signal*, BackupRecordPtr, Uint32 errCode = 0);
......
......@@ -25,15 +25,12 @@ BACKUP_REQ
<-------------------------------
BACKUP_CONF
<----------------
CREATE_TRIG
--------------> (If master crashes here -> rouge triggers/memory leak)
<--------------
START_BACKUP
------------------------------>
<------------------------------
ALTER_TRIG
CREATE_TRIG
-------------->
<--------------
<------------------------------
WAIT_GCP
-------------->
<--------------
......@@ -46,11 +43,11 @@ BACKUP_CONF
WAIT_GCP
-------------->
<--------------
STOP_BACKUP
------------------------------>
DROP_TRIG
-------------->
<--------------
STOP_BACKUP
------------------------------>
<------------------------------
BACKUP_COMPLETE_REP
<----------------
......
......@@ -62,9 +62,6 @@ Backup::Backup(const Configuration & conf) :
addRecSignal(GSN_CREATE_TRIG_REF, &Backup::execCREATE_TRIG_REF);
addRecSignal(GSN_CREATE_TRIG_CONF, &Backup::execCREATE_TRIG_CONF);
addRecSignal(GSN_ALTER_TRIG_REF, &Backup::execALTER_TRIG_REF);
addRecSignal(GSN_ALTER_TRIG_CONF, &Backup::execALTER_TRIG_CONF);
addRecSignal(GSN_DROP_TRIG_REF, &Backup::execDROP_TRIG_REF);
addRecSignal(GSN_DROP_TRIG_CONF, &Backup::execDROP_TRIG_CONF);
......
......@@ -777,6 +777,10 @@ struct TupTriggerData {
/**
* Trigger id, used by DICT/TRIX to identify the trigger
*
* trigger Ids are unique per block for SUBSCRIPTION triggers.
* This is so that BACKUP can use TUP triggers directly and delete them
* properly.
*/
Uint32 triggerId;
......@@ -2012,7 +2016,9 @@ private:
bool createTrigger(Tablerec* table, const CreateTrigReq* req);
Uint32 dropTrigger(Tablerec* table, const DropTrigReq* req);
Uint32 dropTrigger(Tablerec* table,
const DropTrigReq* req,
BlockNumber sender);
void
checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct,
......
......@@ -186,7 +186,7 @@ Dbtup::execDROP_TRIG_REQ(Signal* signal)
ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
// Drop trigger
Uint32 r = dropTrigger(tabPtr.p, req);
Uint32 r = dropTrigger(tabPtr.p, req, refToBlock(senderRef));
if (r == 0){
// Send conf
DropTrigConf* const conf = (DropTrigConf*)signal->getDataPtrSend();
......@@ -318,7 +318,7 @@ Dbtup::primaryKey(Tablerec* const regTabPtr, Uint32 attrId)
/* */
/* ---------------------------------------------------------------- */
Uint32
Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender)
{
if (ERROR_INSERTED(4004)) {
CLEAR_ERROR_INSERT_VALUE;
......@@ -330,7 +330,7 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
TriggerActionTime::Value ttime = req->getTriggerActionTime();
TriggerEvent::Value tevent = req->getTriggerEvent();
// ndbout_c("Drop TupTrigger %u = %u %u %u %u", triggerId, table, ttype, ttime, tevent);
// ndbout_c("Drop TupTrigger %u = %u %u %u %u by %u", triggerId, table, ttype, ttime, tevent, sender);
ArrayList<TupTriggerData>* tlist = findTriggerList(table, ttype, ttime, tevent);
ndbrequire(tlist != NULL);
......@@ -339,6 +339,19 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) {
ljam();
if (ptr.p->triggerId == triggerId) {
if(ttype==TriggerType::SUBSCRIPTION && sender != ptr.p->m_receiverBlock)
{
/**
* You can only drop your own triggers for subscription triggers.
* Trigger IDs are private for each block.
*
* SUMA encodes information in the triggerId
*
* Backup doesn't really care about the Ids though.
*/
ljam();
continue;
}
ljam();
tlist->release(ptr.i);
return 0;
......
......@@ -193,7 +193,7 @@ runDDL(NDBT_Context* ctx, NDBT_Step* step){
}
int runRestartInitial(NDBT_Context* ctx, NDBT_Step* step){
int runDropTablesRestart(NDBT_Context* ctx, NDBT_Step* step){
NdbRestarter restarter;
Ndb* pNdb = GETNDB(step);
......@@ -201,7 +201,7 @@ int runRestartInitial(NDBT_Context* ctx, NDBT_Step* step){
const NdbDictionary::Table *tab = ctx->getTab();
pNdb->getDictionary()->dropTable(tab->getName());
if (restarter.restartAll(true) != 0)
if (restarter.restartAll(false) != 0)
return NDBT_FAILED;
if (restarter.waitClusterStarted() != 0)
......@@ -406,6 +406,7 @@ int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){
// TEMPORARY FIX
// To erase all tables from cache(s)
// To be removed, maybe replaced by ndb.invalidate();
runDropTable(ctx,step);
{
Bank bank(ctx->m_cluster_connection);
......@@ -416,8 +417,8 @@ int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){
}
// END TEMPORARY FIX
ndbout << "Performing initial restart" << endl;
if (restarter.restartAll(true) != 0)
ndbout << "Performing restart" << endl;
if (restarter.restartAll(false) != 0)
return NDBT_FAILED;
if (restarter.waitClusterStarted() != 0)
......@@ -465,12 +466,12 @@ TESTCASE("BackupOne",
"Test that backup and restore works on one table \n"
"1. Load table\n"
"2. Backup\n"
"3. Restart -i\n"
"3. Drop tables and restart \n"
"4. Restore\n"
"5. Verify count and content of table\n"){
INITIALIZER(runLoadTable);
INITIALIZER(runBackupOne);
INITIALIZER(runRestartInitial);
INITIALIZER(runDropTablesRestart);
INITIALIZER(runRestoreOne);
VERIFIER(runVerifyOne);
FINALIZER(runClearTable);
......
......@@ -199,7 +199,6 @@ int
NFDuringBackupM_codes[] = {
10003,
10004,
10005,
10007,
10008,
10009,
......@@ -349,6 +348,7 @@ NdbBackup::NF(NdbRestarter& _restarter, int *NFDuringBackup_codes, const int sz,
int
FailS_codes[] = {
10025,
10027,
10033
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment