Commit ccc4681d authored by unknown's avatar unknown

Merge bk-internal.mysql.com:/home/bk/mysql-5.1-new

into serg.mylan:/usr/home/serg/Abk/mysql-5.1

parents dd918975 4234038b
......@@ -377,7 +377,8 @@ static struct my_option my_long_options[] =
{"create-schema", OPT_CREATE_SLAP_SCHEMA, "Schema to run tests in.",
(gptr*) &create_schema_string, (gptr*) &create_schema_string, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"csv", OPT_CREATE_SLAP_SCHEMA, "Schema to run tests in.",
{"csv", OPT_CREATE_SLAP_SCHEMA,
"Generate CSV output to named file or to stdout if no file is named.",
(gptr*) &opt_csv_str, (gptr*) &opt_csv_str, 0, GET_STR,
OPT_ARG, 0, 0, 0, 0, 0, 0},
{"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.",
......
......@@ -39,7 +39,10 @@ then
AC_DEFINE([$5])
mysql_se_decls="${mysql_se_decls},$6"
mysql_se_htons="${mysql_se_htons},&$6"
mysql_se_objs="$mysql_se_objs $8"
if test "$8" != "no"
then
mysql_se_objs="$mysql_se_objs $8"
fi
mysql_se_dirs="$mysql_se_dirs $7"
mysql_se_libs="$mysql_se_libs $9"
else
......
......@@ -2437,8 +2437,8 @@ MYSQL_STORAGE_ENGINE(archive,,,,,,storage/archive,,
\$(top_builddir)/storage/archive/libarchive.a, [
AC_CONFIG_FILES(storage/archive/Makefile)
])
MYSQL_STORAGE_ENGINE(csv,,,"yes",,tina_hton,storage/csv,
../storage/csv/ha_tina.o,,[
MYSQL_STORAGE_ENGINE(csv,,,"yes",,tina_hton,storage/csv,no,
\$(top_builddir)/storage/csv/libcsv.a,[
AC_CONFIG_FILES(storage/csv/Makefile)
])
MYSQL_STORAGE_ENGINE(blackhole)
......
......@@ -296,7 +296,10 @@ int ha_myisam::dump(THD* thd, int fd)
#endif /* HAVE_REPLICATION */
bool ha_myisam::check_if_locking_is_allowed(THD *thd, TABLE *table, uint count)
bool ha_myisam::check_if_locking_is_allowed(uint sql_command,
ulong type, TABLE *table,
uint count,
bool called_by_logger_thread)
{
/*
To be able to open and lock for reading system tables like 'mysql.proc',
......
......@@ -60,7 +60,10 @@ class ha_myisam: public handler
uint max_supported_key_part_length() const { return MI_MAX_KEY_LENGTH; }
uint checksum() const;
virtual bool check_if_locking_is_allowed(THD *thd, TABLE *table, uint count);
virtual bool check_if_locking_is_allowed(uint sql_command,
ulong type, TABLE *table,
uint count,
bool called_by_logger_thread);
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte * buf);
......
......@@ -1425,6 +1425,12 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
** General handler functions
****************************************************************************/
void handler::ha_statistic_increment(ulong SSV::*offset) const
{
statistic_increment(table->in_use->status_var.*offset, &LOCK_status);
}
/*
Open database-handler.
......
......@@ -1006,6 +1006,8 @@ typedef struct st_handler_buffer
byte *end_of_used_area; /* End of area that was used by handler */
} HANDLER_BUFFER;
typedef struct system_status_var SSV;
class handler :public Sql_alloc
{
#ifdef WITH_PARTITION_STORAGE_ENGINE
......@@ -1027,6 +1029,9 @@ class handler :public Sql_alloc
virtual int rnd_init(bool scan) =0;
virtual int rnd_end() { return 0; }
void ha_statistic_increment(ulong SSV::*offset) const;
private:
virtual int reset() { return extra(HA_EXTRA_RESET); }
public:
......@@ -1109,7 +1114,10 @@ class handler :public Sql_alloc
TRUE Locking is allowed
FALSE Locking is not allowed. The error was thrown.
*/
virtual bool check_if_locking_is_allowed(THD *thd, TABLE *table, uint count)
virtual bool check_if_locking_is_allowed(uint sql_command,
ulong type, TABLE *table,
uint count,
bool called_by_logger_thread)
{
return TRUE;
}
......
......@@ -1932,7 +1932,7 @@ class Item_int_with_ref :public Item_int
virtual Item *real_item() { return ref; }
};
#ifdef MYSQL_SERVER
#include "gstream.h"
#include "spatial.h"
#include "item_sum.h"
......@@ -1945,6 +1945,7 @@ class Item_int_with_ref :public Item_int
#include "item_uniq.h"
#include "item_subselect.h"
#include "item_xmlfunc.h"
#endif
class Item_copy_string :public Item
{
......
......@@ -617,8 +617,11 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
Check if we can lock the table. For some tables we cannot do that
beacause of handler-specific locking issues.
*/
if (!table_ptr[i]->file->check_if_locking_is_allowed(thd, table_ptr[i],
count))
if (!table_ptr[i]-> file->
check_if_locking_is_allowed(thd->lex->sql_command, thd->lex->type,
table_ptr[i], count,
(thd == logger.get_general_log_thd()) ||
(thd == logger.get_slow_log_thd())))
return 0;
}
......
......@@ -477,6 +477,11 @@ inline THD *_current_thd(void)
}
#define current_thd _current_thd()
/* below functions are required for plugins as THD class is opaque */
my_bool thd_in_lock_tables(const THD *thd);
my_bool thd_tablespace_op(const THD *thd);
const char *thd_proc_info(THD *thd, const char *info);
/*
External variables
*/
......@@ -507,7 +512,9 @@ enum enum_var_type
class sys_var;
#include "item.h"
extern my_decimal decimal_zero;
#ifdef MYSQL_SERVER
typedef Comp_creator* (*chooser_compare_func_creator)(bool invert);
#endif
/* sql_parse.cc */
void free_items(Item *item);
void cleanup_items(Item *item);
......@@ -545,6 +552,7 @@ Item *negate_expression(THD *thd, Item *expr);
#include "sql_class.h"
#include "sql_acl.h"
#include "tztime.h"
#ifdef MYSQL_SERVER
#include "opt_range.h"
#ifdef HAVE_QUERY_CACHE
......@@ -841,6 +849,8 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, uint length,
Field *
find_field_in_table_sef(TABLE *table, const char *name);
#endif /* MYSQL_SERVER */
#ifdef HAVE_OPENSSL
#include <openssl/des.h>
struct st_des_keyblock
......@@ -858,6 +868,7 @@ extern pthread_mutex_t LOCK_des_key_file;
bool load_des_key_file(const char *file_name);
#endif /* HAVE_OPENSSL */
#ifdef MYSQL_SERVER
/* sql_do.cc */
bool mysql_do(THD *thd, List<Item> &values);
......@@ -1169,6 +1180,7 @@ int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length);
int key_rec_cmp(void *key_info, byte *a, byte *b);
bool init_errmessage(void);
#endif /* MYSQL_SERVER */
void sql_perror(const char *message);
int vprint_msg_to_log(enum loglevel level, const char *format, va_list args);
......@@ -1191,6 +1203,7 @@ bool general_log_print(THD *thd, enum enum_server_command command,
bool fn_format_relative_to_data_home(my_string to, const char *name,
const char *dir, const char *extension);
#ifdef MYSQL_SERVER
File open_binlog(IO_CACHE *log, const char *log_file_name,
const char **errmsg);
......@@ -1739,4 +1752,5 @@ inline void kill_delayed_threads(void) {}
#define check_stack_overrun(A, B, C) 0
#endif
#endif /* MYSQL_SERVER */
#endif /* MYSQL_CLIENT */
......@@ -167,6 +167,25 @@ Open_tables_state::Open_tables_state(ulong version_arg)
reset_open_tables_state();
}
my_bool thd_in_lock_tables(const THD *thd)
{
return thd->in_lock_tables;
}
my_bool thd_tablespace_op(const THD *thd)
{
return thd->tablespace_op;
}
const char *thd_proc_info(THD *thd, const char *info)
{
const char *old_info= thd->proc_info;
thd->proc_info= info;
return old_info;
}
/*
Pass nominal parameters to Statement constructor only to ensure that
......
......@@ -171,6 +171,7 @@ class LEX_COLUMN : public Sql_alloc
class delayed_insert;
class select_result;
class Time_zone;
#define THD_SENTRY_MAGIC 0xfeedd1ff
#define THD_SENTRY_GONE 0xdeadbeef
......@@ -344,6 +345,8 @@ typedef struct system_status_var
#define last_system_status_var com_stmt_close
#ifdef MYSQL_SERVER
void free_tmp_table(THD *thd, TABLE *entry);
......@@ -354,7 +357,6 @@ void free_tmp_table(THD *thd, TABLE *entry);
#define INIT_ARENA_DBUG_INFO
#endif
class Query_arena
{
public:
......@@ -1905,3 +1907,5 @@ class select_dumpvar :public select_result_interceptor {
/* Functions in sql_class.cc */
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var);
#endif /* MYSQL_SERVER */
......@@ -29,6 +29,7 @@ class st_alter_tablespace;
class partition_info;
class event_timed;
#ifdef MYSQL_SERVER
/*
The following hack is needed because mysql_yacc.cc does not define
YYSTYPE before including this file
......@@ -43,6 +44,7 @@ class event_timed;
#include "sql_yacc.h"
#define LEX_YYSTYPE YYSTYPE *
#endif
#endif
/*
When a command is added here, be sure it's also added in mysqld.cc
......@@ -115,6 +117,8 @@ enum enum_sql_command {
*/
#define DESCRIBE_PARTITIONS 4
#ifdef MYSQL_SERVER
enum enum_sp_suid_behaviour
{
SP_IS_DEFAULT_SUID= 0,
......@@ -1109,3 +1113,5 @@ extern int yylex(void *arg, void *yythd);
extern pthread_key(LEX*,THR_LEX);
#define current_lex (current_thd->lex)
#endif
......@@ -24,17 +24,16 @@ INCLUDES = -I$(top_srcdir)/include \
-I$(top_srcdir)/regex \
-I$(top_srcdir)/sql \
-I$(srcdir)
WRAPLIBS=
pkglib_LTLIBRARIES = ha_csv.la
ha_csv_la_LDFLAGS = -module
noinst_HEADERS = ha_tina.h
ha_csv_la_SOURCES = ha_tina.cc
pkglib_LIBRARIES = libcsv.a
LDADD =
DEFS = -DMYSQL_SERVER @DEFS@
DEFS = @DEFS@
libcsv_a_CXXFLAGS = $(AM_CFLAGS)
noinst_HEADERS = ha_tina.h
libcsv_a_SOURCES = ha_tina.cc
# Don't update the files from bitkeeper
%::SCCS/s.%
......@@ -590,7 +590,10 @@ void ha_tina::update_status()
}
bool ha_tina::check_if_locking_is_allowed(THD *thd, TABLE *table, uint count)
bool ha_tina::check_if_locking_is_allowed(uint sql_command,
ulong type, TABLE *table,
uint count,
bool called_by_logger_thread)
{
/*
Deny locking of the log tables, which is incompatible with
......@@ -598,11 +601,10 @@ bool ha_tina::check_if_locking_is_allowed(THD *thd, TABLE *table, uint count)
general_log_thd or slow_log_thd.
*/
if (table->s->log_table &&
thd->lex->sql_command != SQLCOM_TRUNCATE &&
!(thd->lex->sql_command == SQLCOM_FLUSH &&
thd->lex->type & REFRESH_LOG) &&
(thd != logger.get_general_log_thd()) &&
(thd != logger.get_slow_log_thd()) &&
sql_command != SQLCOM_TRUNCATE &&
!(sql_command == SQLCOM_FLUSH &&
type & REFRESH_LOG) &&
!called_by_logger_thread &&
(table->reginfo.lock_type >= TL_READ_NO_INSERT))
{
/*
......@@ -665,7 +667,7 @@ int ha_tina::write_row(byte * buf)
int size;
DBUG_ENTER("ha_tina::write_row");
statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
ha_statistic_increment(&SSV::ha_write_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
......@@ -714,9 +716,7 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
int size;
DBUG_ENTER("ha_tina::update_row");
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
&LOCK_status);
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
......@@ -751,8 +751,7 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
int ha_tina::delete_row(const byte * buf)
{
DBUG_ENTER("ha_tina::delete_row");
statistic_increment(table->in_use->status_var.ha_delete_count,
&LOCK_status);
ha_statistic_increment(&SSV::ha_delete_count);
if (chain_append())
DBUG_RETURN(-1);
......@@ -903,8 +902,7 @@ int ha_tina::rnd_next(byte *buf)
{
DBUG_ENTER("ha_tina::rnd_next");
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
&LOCK_status);
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
current_position= next_position;
if (!share->mapped_file)
......@@ -941,8 +939,7 @@ void ha_tina::position(const byte *record)
int ha_tina::rnd_pos(byte * buf, byte *pos)
{
DBUG_ENTER("ha_tina::rnd_pos");
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
&LOCK_status);
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
current_position= my_get_ptr(pos,ref_length);
DBUG_RETURN(find_current_row(buf));
}
......
......@@ -104,7 +104,10 @@ class ha_tina: public handler
*/
ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; }
virtual bool check_if_locking_is_allowed(THD *thd, TABLE *table, uint count);
virtual bool check_if_locking_is_allowed(uint sql_command,
ulong type, TABLE *table,
uint count,
bool called_by_logger_thread);
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte * buf);
......
......@@ -139,21 +139,11 @@ class StartBackupReq {
friend bool printSTART_BACKUP_REQ(FILE *, const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( MaxTableTriggers = 4 );
STATIC_CONST( HeaderLength = 5 );
STATIC_CONST( TableTriggerLength = 4);
STATIC_CONST( SignalLength = 2 );
private:
Uint32 backupId;
Uint32 backupPtr;
Uint32 signalNo;
Uint32 noOfSignals;
Uint32 noOfTableTriggers;
struct TableTriggers {
Uint32 tableId;
Uint32 triggerIds[3];
} tableTriggers[MaxTableTriggers];
};
class StartBackupRef {
......@@ -169,7 +159,7 @@ class StartBackupRef {
friend bool printSTART_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( SignalLength = 5 );
STATIC_CONST( SignalLength = 4 );
enum ErrorCode {
FailedToAllocateTriggerRecord = 1
......@@ -177,7 +167,6 @@ public:
private:
Uint32 backupId;
Uint32 backupPtr;
Uint32 signalNo;
Uint32 errorCode;
Uint32 nodeId;
};
......@@ -195,12 +184,11 @@ class StartBackupConf {
friend bool printSTART_BACKUP_CONF(FILE *, const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( SignalLength = 3 );
STATIC_CONST( SignalLength = 2 );
private:
Uint32 backupId;
Uint32 backupPtr;
Uint32 signalNo;
};
class BackupFragmentReq {
......
......@@ -48,16 +48,8 @@ printDEFINE_BACKUP_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){
bool
printSTART_BACKUP_REQ(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){
StartBackupReq* sig = (StartBackupReq*)data;
fprintf(out, " backupPtr: %d backupId: %d signalNo: %d of %d\n",
sig->backupPtr, sig->backupId,
sig->signalNo + 1, sig->noOfSignals);
for(Uint32 i = 0; i<sig->noOfTableTriggers; i++)
fprintf(out,
" Table: %d Triggers = [ insert: %d update: %d delete: %d ]\n",
sig->tableTriggers[i].tableId,
sig->tableTriggers[i].triggerIds[TriggerEvent::TE_INSERT],
sig->tableTriggers[i].triggerIds[TriggerEvent::TE_UPDATE],
sig->tableTriggers[i].triggerIds[TriggerEvent::TE_DELETE]);
fprintf(out, " backupPtr: %d backupId: %d\n",
sig->backupPtr, sig->backupId);
return true;
}
......
......@@ -501,12 +501,6 @@ const TriggerEvent::Value triggerEventValues[] = {
TriggerEvent::TE_DELETE
};
const char* triggerNameFormat[] = {
"NDB$BACKUP_%d_%d_INSERT",
"NDB$BACKUP_%d_%d_UPDATE",
"NDB$BACKUP_%d_%d_DELETE"
};
const Backup::State
Backup::validSlaveTransitions[] = {
INITIAL, DEFINING,
......@@ -776,7 +770,6 @@ Backup::checkNodeFail(Signal* signal,
ref->backupPtr = ptr.i;
ref->backupId = ptr.p->backupId;
ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail;
ref->signalNo = ptr.p->masterData.startBackup.signalNo;
gsn= GSN_START_BACKUP_REF;
len= StartBackupRef::SignalLength;
pos= &ref->nodeId - signal->getDataPtr();
......@@ -928,9 +921,7 @@ Backup::execBACKUP_REQ(Signal* signal)
ptr.p->backupKey[1] = 0;
ptr.p->backupDataLen = 0;
ptr.p->masterData.errorCode = 0;
ptr.p->masterData.dropTrig.tableId = RNIL;
ptr.p->masterData.alterTrig.tableId = RNIL;
UtilSequenceReq * utilReq = (UtilSequenceReq*)signal->getDataPtrSend();
ptr.p->masterData.gsn = GSN_UTIL_SEQUENCE_REQ;
......@@ -1241,13 +1232,18 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
signal->theData[2] = ptr.p->backupId;
ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+3);
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3+NdbNodeBitmask::Size, JBB);
/**
* Prepare Trig
* We've received GSN_DEFINE_BACKUP_CONF from all participants.
*
* Our next step is to send START_BACKUP_REQ to all participants,
* who will then send CREATE_TRIG_REQ for all tables to their local
* DBTUP.
*/
TablePtr tabPtr;
ndbrequire(ptr.p->tables.first(tabPtr));
sendCreateTrig(signal, ptr, tabPtr);
ptr.p->tables.first(tabPtr);
sendStartBackup(signal, ptr, tabPtr);
}
/*****************************************************************************
......@@ -1275,43 +1271,72 @@ Backup::sendCreateTrig(Signal* signal,
BackupRecordPtr ptr, TablePtr tabPtr)
{
CreateTrigReq * req =(CreateTrigReq *)signal->getDataPtrSend();
ptr.p->masterData.gsn = GSN_CREATE_TRIG_REQ;
ptr.p->masterData.sendCounter = 3;
ptr.p->masterData.createTrig.tableId = tabPtr.p->tableId;
/*
* First, setup the structures
*/
for(Uint32 j=0; j<3; j++) {
jam();
TriggerPtr trigPtr;
if(!ptr.p->triggers.seize(trigPtr)) {
jam();
ptr.p->m_gsn = GSN_START_BACKUP_REF;
StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend();
ref->backupPtr = ptr.i;
ref->backupId = ptr.p->backupId;
ref->errorCode = StartBackupRef::FailedToAllocateTriggerRecord;
ref->nodeId = getOwnNodeId();
sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal,
StartBackupRef::SignalLength, JBB);
return;
} // if
const Uint32 triggerId= trigPtr.i;
tabPtr.p->triggerIds[j] = triggerId;
tabPtr.p->triggerAllocated[j] = true;
trigPtr.p->backupPtr = ptr.i;
trigPtr.p->tableId = tabPtr.p->tableId;
trigPtr.p->tab_ptr_i = tabPtr.i;
trigPtr.p->logEntry = 0;
trigPtr.p->event = j;
trigPtr.p->maxRecordSize = 2048;
trigPtr.p->operation =
&ptr.p->files.getPtr(ptr.p->logFilePtr)->operation;
trigPtr.p->operation->noOfBytes = 0;
trigPtr.p->operation->noOfRecords = 0;
trigPtr.p->errorCode = 0;
} // for
/*
* now ask DBTUP to create
*/
ptr.p->slaveData.gsn = GSN_CREATE_TRIG_REQ;
ptr.p->slaveData.trigSendCounter = 3;
ptr.p->slaveData.createTrig.tableId = tabPtr.p->tableId;
req->setUserRef(reference());
req->setReceiverRef(reference());
req->setConnectionPtr(ptr.i);
req->setRequestType(CreateTrigReq::RT_USER);
Bitmask<MAXNROFATTRIBUTESINWORDS> attrMask;
createAttributeMask(tabPtr, attrMask);
req->setAttributeMask(attrMask);
req->setTableId(tabPtr.p->tableId);
req->setIndexId(RNIL); // not used
req->setTriggerId(RNIL); // to be created
req->setTriggerType(TriggerType::SUBSCRIPTION);
req->setTriggerActionTime(TriggerActionTime::TA_DETACHED);
req->setMonitorReplicas(true);
req->setMonitorAllAttributes(false);
req->setOnline(false); // leave trigger offline
req->setOnline(true);
char triggerName[MAX_TAB_NAME_SIZE];
Uint32 nameBuffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string
LinearWriter w(nameBuffer, sizeof(nameBuffer) >> 2);
LinearSectionPtr lsPtr[3];
for (int i=0; i < 3; i++) {
req->setTriggerId(tabPtr.p->triggerIds[i]);
req->setTriggerEvent(triggerEventValues[i]);
req->setReportAllMonitoredAttributes(false);
BaseString::snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i],
ptr.p->backupId, tabPtr.p->tableId);
w.reset();
w.add(CreateTrigReq::TriggerNameKey, triggerName);
lsPtr[0].p = nameBuffer;
lsPtr[0].sz = w.getWordsUsed();
sendSignal(DBDICT_REF, GSN_CREATE_TRIG_REQ,
signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1);
sendSignal(DBTUP_REF, GSN_CREATE_TRIG_REQ,
signal, CreateTrigReq::SignalLength, JBB);
}
}
......@@ -1331,25 +1356,25 @@ Backup::execCREATE_TRIG_CONF(Signal* signal)
/**
* Verify that I'm waiting for this conf
*
* ptr.p->masterRef != reference()
* as slaves and masters have triggers now.
*/
ndbrequire(ptr.p->masterRef == reference());
ndbrequire(ptr.p->masterData.gsn == GSN_CREATE_TRIG_REQ);
ndbrequire(ptr.p->masterData.sendCounter.done() == false);
ndbrequire(ptr.p->masterData.createTrig.tableId == tableId);
ndbrequire(ptr.p->slaveData.gsn == GSN_CREATE_TRIG_REQ);
ndbrequire(ptr.p->slaveData.trigSendCounter.done() == false);
ndbrequire(ptr.p->slaveData.createTrig.tableId == tableId);
TablePtr tabPtr;
ndbrequire(findTable(ptr, tabPtr, tableId));
ndbrequire(type < 3); // if some decides to change the enums
ndbrequire(tabPtr.p->triggerIds[type] == ILLEGAL_TRIGGER_ID);
tabPtr.p->triggerIds[type] = triggerId;
createTrigReply(signal, ptr);
}
void
Backup::execCREATE_TRIG_REF(Signal* signal)
{
jamEntry();
CreateTrigRef* ref = (CreateTrigRef*)signal->getDataPtr();
const Uint32 ptrI = ref->getConnectionPtr();
......@@ -1360,14 +1385,16 @@ Backup::execCREATE_TRIG_REF(Signal* signal)
/**
* Verify that I'm waiting for this ref
*
* ptr.p->masterRef != reference()
* as slaves and masters have triggers now
*/
ndbrequire(ptr.p->masterRef == reference());
ndbrequire(ptr.p->masterData.gsn == GSN_CREATE_TRIG_REQ);
ndbrequire(ptr.p->masterData.sendCounter.done() == false);
ndbrequire(ptr.p->masterData.createTrig.tableId == tableId);
ndbrequire(ptr.p->slaveData.gsn == GSN_CREATE_TRIG_REQ);
ndbrequire(ptr.p->slaveData.trigSendCounter.done() == false);
ndbrequire(ptr.p->slaveData.createTrig.tableId == tableId);
ptr.p->setErrorCode(ref->getErrorCode());
createTrigReply(signal, ptr);
}
......@@ -1379,26 +1406,33 @@ Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr)
/**
* Check finished with table
*/
ptr.p->masterData.sendCounter--;
if(ptr.p->masterData.sendCounter.done() == false){
ptr.p->slaveData.trigSendCounter--;
if(ptr.p->slaveData.trigSendCounter.done() == false){
jam();
return;
}//if
if (ERROR_INSERTED(10025))
if (ERROR_INSERTED(10025))
{
ptr.p->errorCode = 325;
}
if(ptr.p->checkError()) {
jam();
masterAbort(signal, ptr);
ptr.p->m_gsn = GSN_START_BACKUP_REF;
StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend();
ref->backupPtr = ptr.i;
ref->backupId = ptr.p->backupId;
ref->errorCode = ptr.p->errorCode;
ref->nodeId = getOwnNodeId();
sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal,
StartBackupRef::SignalLength, JBB);
return;
}//if
TablePtr tabPtr;
ndbrequire(findTable(ptr, tabPtr, ptr.p->masterData.createTrig.tableId));
ndbrequire(findTable(ptr, tabPtr, ptr.p->slaveData.createTrig.tableId));
/**
* Next table
*/
......@@ -1410,14 +1444,16 @@ Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr)
}//if
/**
* Finished with all tables, send StartBackupReq
* We've finished creating triggers.
*
* send conf and wait
*/
ptr.p->tables.first(tabPtr);
ptr.p->masterData.startBackup.signalNo = 0;
ptr.p->masterData.startBackup.noOfSignals =
(ptr.p->tables.noOfElements() + StartBackupReq::MaxTableTriggers - 1) /
StartBackupReq::MaxTableTriggers;
sendStartBackup(signal, ptr, tabPtr);
ptr.p->m_gsn = GSN_START_BACKUP_CONF;
StartBackupConf* conf = (StartBackupConf*)signal->getDataPtrSend();
conf->backupPtr = ptr.i;
conf->backupId = ptr.p->backupId;
sendSignal(ptr.p->masterRef, GSN_START_BACKUP_CONF, signal,
StartBackupConf::SignalLength, JBB);
}
/*****************************************************************************
......@@ -1430,33 +1466,23 @@ Backup::sendStartBackup(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr)
{
ptr.p->masterData.startBackup.tablePtr = tabPtr.i;
StartBackupReq* req = (StartBackupReq*)signal->getDataPtrSend();
req->backupId = ptr.p->backupId;
req->backupPtr = ptr.i;
req->signalNo = ptr.p->masterData.startBackup.signalNo;
req->noOfSignals = ptr.p->masterData.startBackup.noOfSignals;
Uint32 i;
for(i = 0; i<StartBackupReq::MaxTableTriggers; i++) {
jam();
req->tableTriggers[i].tableId = tabPtr.p->tableId;
req->tableTriggers[i].triggerIds[0] = tabPtr.p->triggerIds[0];
req->tableTriggers[i].triggerIds[1] = tabPtr.p->triggerIds[1];
req->tableTriggers[i].triggerIds[2] = tabPtr.p->triggerIds[2];
if(!ptr.p->tables.next(tabPtr)){
jam();
i++;
break;
}//if
}//for
req->noOfTableTriggers = i;
/**
* We use trigger Ids that are unique to BACKUP.
* These don't interfere with other triggers (e.g. from DBDICT)
* as there is a special case in DBTUP.
*
* Consequently, backups during online upgrade won't work
*/
ptr.p->masterData.gsn = GSN_START_BACKUP_REQ;
ptr.p->masterData.sendCounter = ptr.p->nodes;
NodeReceiverGroup rg(BACKUP, ptr.p->nodes);
sendSignal(rg, GSN_START_BACKUP_REQ, signal,
StartBackupReq::HeaderLength +
(i * StartBackupReq::TableTriggerLength), JBB);
sendSignal(rg, GSN_START_BACKUP_REQ, signal,
StartBackupReq::SignalLength, JBB);
}
void
......@@ -1467,14 +1493,13 @@ Backup::execSTART_BACKUP_REF(Signal* signal)
StartBackupRef* ref = (StartBackupRef*)signal->getDataPtr();
const Uint32 ptrI = ref->backupPtr;
//const Uint32 backupId = ref->backupId;
const Uint32 signalNo = ref->signalNo;
const Uint32 nodeId = ref->nodeId;
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
ptr.p->setErrorCode(ref->errorCode);
startBackupReply(signal, ptr, nodeId, signalNo);
startBackupReply(signal, ptr, nodeId);
}
void
......@@ -1485,23 +1510,20 @@ Backup::execSTART_BACKUP_CONF(Signal* signal)
StartBackupConf* conf = (StartBackupConf*)signal->getDataPtr();
const Uint32 ptrI = conf->backupPtr;
//const Uint32 backupId = conf->backupId;
const Uint32 signalNo = conf->signalNo;
const Uint32 nodeId = refToNode(signal->senderBlockRef());
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
startBackupReply(signal, ptr, nodeId, signalNo);
startBackupReply(signal, ptr, nodeId);
}
void
Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr,
Uint32 nodeId, Uint32 signalNo)
Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
{
CRASH_INSERTION((10004));
ndbrequire(ptr.p->masterData.startBackup.signalNo == signalNo);
if (!haveAllSignals(ptr, GSN_START_BACKUP_REQ, nodeId)) {
jam();
return;
......@@ -1518,148 +1540,20 @@ Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr,
return;
}
TablePtr tabPtr;
c_tablePool.getPtr(tabPtr, ptr.p->masterData.startBackup.tablePtr);
for(Uint32 i = 0; i<StartBackupReq::MaxTableTriggers; i++) {
jam();
if(!ptr.p->tables.next(tabPtr)) {
jam();
break;
}//if
}//for
if(tabPtr.i != RNIL) {
jam();
ptr.p->masterData.startBackup.signalNo++;
sendStartBackup(signal, ptr, tabPtr);
return;
}
sendAlterTrig(signal, ptr);
}
/*****************************************************************************
*
* Master functionallity - Activate triggers
*
*****************************************************************************/
void
Backup::sendAlterTrig(Signal* signal, BackupRecordPtr ptr)
{
AlterTrigReq * req =(AlterTrigReq *)signal->getDataPtrSend();
ptr.p->masterData.gsn = GSN_ALTER_TRIG_REQ;
ptr.p->masterData.sendCounter = 0;
req->setUserRef(reference());
req->setConnectionPtr(ptr.i);
req->setRequestType(AlterTrigReq::RT_USER);
req->setTriggerInfo(0); // not used on ALTER via DICT
req->setOnline(true);
req->setReceiverRef(reference());
TablePtr tabPtr;
if (ptr.p->masterData.alterTrig.tableId == RNIL) {
jam();
ptr.p->tables.first(tabPtr);
} else {
jam();
ndbrequire(findTable(ptr, tabPtr, ptr.p->masterData.alterTrig.tableId));
ptr.p->tables.next(tabPtr);
}//if
if (tabPtr.i != RNIL) {
jam();
ptr.p->masterData.alterTrig.tableId = tabPtr.p->tableId;
req->setTableId(tabPtr.p->tableId);
req->setTriggerId(tabPtr.p->triggerIds[0]);
sendSignal(DBDICT_REF, GSN_ALTER_TRIG_REQ,
signal, AlterTrigReq::SignalLength, JBB);
req->setTriggerId(tabPtr.p->triggerIds[1]);
sendSignal(DBDICT_REF, GSN_ALTER_TRIG_REQ,
signal, AlterTrigReq::SignalLength, JBB);
req->setTriggerId(tabPtr.p->triggerIds[2]);
sendSignal(DBDICT_REF, GSN_ALTER_TRIG_REQ,
signal, AlterTrigReq::SignalLength, JBB);
ptr.p->masterData.sendCounter += 3;
return;
}//if
ptr.p->masterData.alterTrig.tableId = RNIL;
/**
* Finished with all tables
* Wait for GCP
*/
ptr.p->masterData.gsn = GSN_WAIT_GCP_REQ;
ptr.p->masterData.waitGCP.startBackup = true;
WaitGCPReq * waitGCPReq = (WaitGCPReq*)signal->getDataPtrSend();
waitGCPReq->senderRef = reference();
waitGCPReq->senderData = ptr.i;
waitGCPReq->requestType = WaitGCPReq::CompleteForceStart;
sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal,
sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal,
WaitGCPReq::SignalLength,JBB);
}
void
Backup::execALTER_TRIG_CONF(Signal* signal)
{
jamEntry();
AlterTrigConf* conf = (AlterTrigConf*)signal->getDataPtr();
const Uint32 ptrI = conf->getConnectionPtr();
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
alterTrigReply(signal, ptr);
}
void
Backup::execALTER_TRIG_REF(Signal* signal)
{
jamEntry();
AlterTrigRef* ref = (AlterTrigRef*)signal->getDataPtr();
const Uint32 ptrI = ref->getConnectionPtr();
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
ptr.p->setErrorCode(ref->getErrorCode());
alterTrigReply(signal, ptr);
}
void
Backup::alterTrigReply(Signal* signal, BackupRecordPtr ptr)
{
CRASH_INSERTION((10005));
ndbrequire(ptr.p->masterRef == reference());
ndbrequire(ptr.p->masterData.gsn == GSN_ALTER_TRIG_REQ);
ndbrequire(ptr.p->masterData.sendCounter.done() == false);
ptr.p->masterData.sendCounter--;
if(ptr.p->masterData.sendCounter.done() == false){
jam();
return;
}//if
if(ptr.p->checkError()){
jam();
masterAbort(signal, ptr);
return;
}//if
sendAlterTrig(signal, ptr);
}
void
Backup::execWAIT_GCP_REF(Signal* signal)
{
......@@ -1720,7 +1614,12 @@ Backup::execWAIT_GCP_CONF(Signal* signal){
{
CRASH_INSERTION((10009));
ptr.p->stopGCP = gcp;
sendDropTrig(signal, ptr); // regular dropping of triggers
/**
* Backup is complete - begin cleanup
* STOP_BACKUP_REQ is sent to participants.
* They then drop the local triggers
*/
sendStopBackup(signal, ptr);
return;
}//if
......@@ -1927,8 +1826,8 @@ Backup::execBACKUP_FRAGMENT_REF(Signal* signal)
}
/*****************************************************************************
*
* Master functionallity - Drop triggers
*
* Slave functionallity - Drop triggers
*
*****************************************************************************/
......@@ -1936,23 +1835,63 @@ void
Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr)
{
TablePtr tabPtr;
if (ptr.p->masterData.dropTrig.tableId == RNIL) {
ptr.p->slaveData.gsn = GSN_DROP_TRIG_REQ;
if (ptr.p->slaveData.dropTrig.tableId == RNIL) {
jam();
ptr.p->tables.first(tabPtr);
} else {
jam();
ndbrequire(findTable(ptr, tabPtr, ptr.p->masterData.dropTrig.tableId));
ndbrequire(findTable(ptr, tabPtr, ptr.p->slaveData.dropTrig.tableId));
ptr.p->tables.next(tabPtr);
}//if
if (tabPtr.i != RNIL) {
jam();
sendDropTrig(signal, ptr, tabPtr);
} else {
jam();
ptr.p->masterData.dropTrig.tableId = RNIL;
/**
* Insert footers
*/
{
BackupFilePtr filePtr;
ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr);
Uint32 * dst;
ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, 1));
* dst = 0;
filePtr.p->operation.dataBuffer.updateWritePtr(1);
}
sendStopBackup(signal, ptr);
}//if
{
BackupFilePtr filePtr;
ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
const Uint32 gcpSz = sizeof(BackupFormat::CtlFile::GCPEntry) >> 2;
Uint32 * dst;
ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, gcpSz));
BackupFormat::CtlFile::GCPEntry * gcp =
(BackupFormat::CtlFile::GCPEntry*)dst;
gcp->SectionType = htonl(BackupFormat::GCP_ENTRY);
gcp->SectionLength = htonl(gcpSz);
gcp->StartGCP = htonl(ptr.p->startGCP);
gcp->StopGCP = htonl(ptr.p->stopGCP - 1);
filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz);
}
{ // UNLOCK while dropping trigger for better timeslicing
TablePtr tabPtr;
for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL;
ptr.p->tables.next(tabPtr))
{
signal->theData[0] = tabPtr.p->tableId;
signal->theData[1] = 0; // unlock
EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2);
}
}
closeFiles(signal, ptr);
}
}
void
......@@ -1961,40 +1900,26 @@ Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr)
jam();
DropTrigReq * req = (DropTrigReq *)signal->getDataPtrSend();
ptr.p->masterData.gsn = GSN_DROP_TRIG_REQ;
ptr.p->masterData.sendCounter = 0;
ptr.p->slaveData.gsn = GSN_DROP_TRIG_REQ;
ptr.p->slaveData.trigSendCounter = 0;
req->setConnectionPtr(ptr.i);
req->setUserRef(reference()); // Sending to myself
req->setRequestType(DropTrigReq::RT_USER);
req->setIndexId(RNIL);
req->setTriggerInfo(0); // not used on DROP via DICT
req->setTriggerInfo(0); // not used on DROP
req->setTriggerType(TriggerType::SUBSCRIPTION);
req->setTriggerActionTime(TriggerActionTime::TA_DETACHED);
char triggerName[MAX_TAB_NAME_SIZE];
Uint32 nameBuffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string
LinearWriter w(nameBuffer, sizeof(nameBuffer) >> 2);
LinearSectionPtr lsPtr[3];
ptr.p->masterData.dropTrig.tableId = tabPtr.p->tableId;
ptr.p->slaveData.dropTrig.tableId = tabPtr.p->tableId;
req->setTableId(tabPtr.p->tableId);
for (int i = 0; i < 3; i++) {
Uint32 id = tabPtr.p->triggerIds[i];
req->setTriggerId(id);
if (id != ILLEGAL_TRIGGER_ID) {
sendSignal(DBDICT_REF, GSN_DROP_TRIG_REQ,
signal, DropTrigReq::SignalLength, JBB);
} else {
BaseString::snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i],
ptr.p->backupId, tabPtr.p->tableId);
w.reset();
w.add(CreateTrigReq::TriggerNameKey, triggerName);
lsPtr[0].p = nameBuffer;
lsPtr[0].sz = w.getWordsUsed();
sendSignal(DBDICT_REF, GSN_DROP_TRIG_REQ,
signal, DropTrigReq::SignalLength, JBB, lsPtr, 1);
}
ptr.p->masterData.sendCounter ++;
req->setTriggerEvent(triggerEventValues[i]);
sendSignal(DBTUP_REF, GSN_DROP_TRIG_REQ,
signal, DropTrigReq::SignalLength, JBB);
ptr.p->slaveData.trigSendCounter ++;
}
}
......@@ -2005,11 +1930,13 @@ Backup::execDROP_TRIG_REF(Signal* signal)
DropTrigRef* ref = (DropTrigRef*)signal->getDataPtr();
const Uint32 ptrI = ref->getConnectionPtr();
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
//ndbrequire(ref->getErrorCode() == DropTrigRef::NoSuchTrigger);
ndbout << "ERROR DROPPING TRIGGER: " << ref->getConf()->getTriggerId();
ndbout << " Err: " << (Uint32)ref->getErrorCode() << endl << endl;
dropTrigReply(signal, ptr);
}
......@@ -2020,29 +1947,29 @@ Backup::execDROP_TRIG_CONF(Signal* signal)
DropTrigConf* conf = (DropTrigConf*)signal->getDataPtr();
const Uint32 ptrI = conf->getConnectionPtr();
const Uint32 triggerId= conf->getTriggerId();
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
dropTrigReply(signal, ptr);
}
void
Backup::dropTrigReply(Signal* signal, BackupRecordPtr ptr)
{
CRASH_INSERTION((10012));
ndbrequire(ptr.p->masterRef == reference());
ndbrequire(ptr.p->masterData.gsn == GSN_DROP_TRIG_REQ);
ndbrequire(ptr.p->masterData.sendCounter.done() == false);
ptr.p->masterData.sendCounter--;
if(ptr.p->masterData.sendCounter.done() == false){
ndbrequire(ptr.p->slaveData.gsn == GSN_DROP_TRIG_REQ);
ndbrequire(ptr.p->slaveData.trigSendCounter.done() == false);
// move from .masterData to .slaveData
ptr.p->slaveData.trigSendCounter--;
if(ptr.p->slaveData.trigSendCounter.done() == false){
jam();
return;
}//if
sendDropTrig(signal, ptr); // recursive next
}
......@@ -2165,6 +2092,9 @@ Backup::masterAbort(Signal* signal, BackupRecordPtr ptr)
#ifdef DEBUG_ABORT
ndbout_c("************ masterAbort");
#endif
ndbassert(ptr.p->masterRef == reference());
if(ptr.p->masterData.errorCode != 0)
{
jam();
......@@ -2208,13 +2138,13 @@ Backup::masterAbort(Signal* signal, BackupRecordPtr ptr)
case GSN_BACKUP_FRAGMENT_REQ:
jam();
ptr.p->stopGCP= ptr.p->startGCP + 1;
sendDropTrig(signal, ptr); // dropping due to error
sendStopBackup(signal, ptr); // dropping due to error
return;
case GSN_UTIL_SEQUENCE_REQ:
case GSN_UTIL_LOCK_REQ:
case GSN_DROP_TRIG_REQ:
ndbrequire(false);
return;
case GSN_DROP_TRIG_REQ:
case GSN_STOP_BACKUP_REQ:
return;
}
......@@ -2329,6 +2259,7 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal)
ptr.p->m_gsn = GSN_DEFINE_BACKUP_REQ;
ptr.p->slaveState.forceState(INITIAL);
ptr.p->slaveState.setState(DEFINING);
ptr.p->slaveData.dropTrig.tableId = RNIL;
ptr.p->errorCode = 0;
ptr.p->clientRef = req->clientRef;
ptr.p->clientData = req->clientData;
......@@ -2345,14 +2276,14 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal)
ptr.p->backupKey[0] = req->backupKey[0];
ptr.p->backupKey[1] = req->backupKey[1];
ptr.p->backupDataLen = req->backupDataLen;
ptr.p->masterData.dropTrig.tableId = RNIL;
ptr.p->masterData.alterTrig.tableId = RNIL;
ptr.p->masterData.errorCode = 0;
ptr.p->noOfBytes = 0;
ptr.p->noOfRecords = 0;
ptr.p->noOfLogBytes = 0;
ptr.p->noOfLogRecords = 0;
ptr.p->currGCP = 0;
ptr.p->startGCP = 0;
ptr.p->stopGCP = 0;
/**
* Allocate files
......@@ -3261,63 +3192,22 @@ Backup::execSTART_BACKUP_REQ(Signal* signal)
jamEntry();
CRASH_INSERTION((10015));
StartBackupReq* req = (StartBackupReq*)signal->getDataPtr();
const Uint32 ptrI = req->backupPtr;
//const Uint32 backupId = req->backupId;
const Uint32 signalNo = req->signalNo;
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
ptr.p->slaveState.setState(STARTED);
ptr.p->m_gsn = GSN_START_BACKUP_REQ;
for(Uint32 i = 0; i<req->noOfTableTriggers; i++) {
jam();
TablePtr tabPtr;
ndbrequire(findTable(ptr, tabPtr, req->tableTriggers[i].tableId));
for(Uint32 j = 0; j<3; j++) {
jam();
const Uint32 triggerId = req->tableTriggers[i].triggerIds[j];
tabPtr.p->triggerIds[j] = triggerId;
TriggerPtr trigPtr;
if(!ptr.p->triggers.seizeId(trigPtr, triggerId)) {
jam();
ptr.p->m_gsn = GSN_START_BACKUP_REF;
StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend();
ref->backupPtr = ptr.i;
ref->backupId = ptr.p->backupId;
ref->signalNo = signalNo;
ref->errorCode = StartBackupRef::FailedToAllocateTriggerRecord;
ref->nodeId = getOwnNodeId();
sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal,
StartBackupRef::SignalLength, JBB);
return;
}//if
tabPtr.p->triggerAllocated[j] = true;
trigPtr.p->backupPtr = ptr.i;
trigPtr.p->tableId = tabPtr.p->tableId;
trigPtr.p->tab_ptr_i = tabPtr.i;
trigPtr.p->logEntry = 0;
trigPtr.p->event = j;
trigPtr.p->maxRecordSize = 2048;
trigPtr.p->operation =
&ptr.p->files.getPtr(ptr.p->logFilePtr)->operation;
trigPtr.p->operation->noOfBytes = 0;
trigPtr.p->operation->noOfRecords = 0;
trigPtr.p->errorCode = 0;
}//for
}//for
/**
* Start file threads...
*/
BackupFilePtr filePtr;
for(ptr.p->files.first(filePtr);
filePtr.i!=RNIL;
for(ptr.p->files.first(filePtr);
filePtr.i!=RNIL;
ptr.p->files.next(filePtr)){
jam();
if(filePtr.p->fileRunning == 0) {
......@@ -3328,14 +3218,13 @@ Backup::execSTART_BACKUP_REQ(Signal* signal)
sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 2);
}//if
}//for
ptr.p->m_gsn = GSN_START_BACKUP_CONF;
StartBackupConf* conf = (StartBackupConf*)signal->getDataPtrSend();
conf->backupPtr = ptr.i;
conf->backupId = ptr.p->backupId;
conf->signalNo = signalNo;
sendSignal(ptr.p->masterRef, GSN_START_BACKUP_CONF, signal,
StartBackupConf::SignalLength, JBB);
/**
* Tell DBTUP to create triggers
*/
TablePtr tabPtr;
ndbrequire(ptr.p->tables.first(tabPtr));
sendCreateTrig(signal, ptr, tabPtr);
}
/*****************************************************************************
......@@ -3887,7 +3776,7 @@ void
Backup::execFSAPPENDCONF(Signal* signal)
{
jamEntry();
CRASH_INSERTION((10018));
//FsConf * conf = (FsConf*)signal->getDataPtr();
......@@ -3990,10 +3879,13 @@ Backup::execBACKUP_TRIG_REQ(Signal* signal)
Uint32 result;
jamEntry();
c_triggerPool.getPtr(trigPtr, trigger_id);
c_tablePool.getPtr(tabPtr, trigPtr.p->tab_ptr_i);
tabPtr.p->fragments.getPtr(fragPtr, frag_id);
if (fragPtr.p->node != getOwnNodeId()) {
jam();
result = ZFALSE;
} else {
......@@ -4014,12 +3906,12 @@ Backup::execTRIG_ATTRINFO(Signal* signal) {
TriggerPtr trigPtr;
c_triggerPool.getPtr(trigPtr, trg->getTriggerId());
ndbrequire(trigPtr.p->event != ILLEGAL_TRIGGER_ID); // Online...
if(trigPtr.p->errorCode != 0) {
jam();
return;
}//if
if(trg->getAttrInfoType() == TrigAttrInfo::BEFORE_VALUES) {
jam();
/**
......@@ -4056,18 +3948,29 @@ Backup::execTRIG_ATTRINFO(Signal* signal) {
memcpy(signal->getDataPtrSend(), save, 4*TrigAttrInfo::StaticLength);
return;
}//if
logEntry = (BackupFormat::LogFile::LogEntry *)dst;
trigPtr.p->logEntry = logEntry;
logEntry->Length = 0;
logEntry->TableId = htonl(trigPtr.p->tableId);
logEntry->TriggerEvent = htonl(trigPtr.p->event);
if(trigPtr.p->event==0)
logEntry->TriggerEvent= htonl(TriggerEvent::TE_INSERT);
else if(trigPtr.p->event==1)
logEntry->TriggerEvent= htonl(TriggerEvent::TE_UPDATE);
else if(trigPtr.p->event==2)
logEntry->TriggerEvent= htonl(TriggerEvent::TE_DELETE);
else {
ndbout << "Bad Event: " << trigPtr.p->event << endl;
ndbrequire(false);
}
} else {
ndbrequire(logEntry->TableId == htonl(trigPtr.p->tableId));
ndbrequire(logEntry->TriggerEvent == htonl(trigPtr.p->event));
// ndbrequire(logEntry->TriggerEvent == htonl(trigPtr.p->event));
}//if
const Uint32 pos = logEntry->Length;
const Uint32 pos = logEntry->Length;
const Uint32 dataLen = signal->length() - TrigAttrInfo::StaticLength;
memcpy(&logEntry->Data[pos], trg->getData(), dataLen << 2);
......@@ -4100,16 +4003,15 @@ Backup::execFIRE_TRIG_ORD(Signal* signal)
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, trigPtr.p->backupPtr);
if(gci != ptr.p->currGCP)
if(gci != ptr.p->currGCP)
{
jam();
trigPtr.p->logEntry->TriggerEvent = htonl(trigPtr.p->event | 0x10000);
trigPtr.p->logEntry->TriggerEvent|= htonl(0x10000);
trigPtr.p->logEntry->Data[len] = htonl(gci);
len++;
ptr.p->currGCP = gci;
}//if
}
len += (sizeof(BackupFormat::LogFile::LogEntry) >> 2) - 2;
trigPtr.p->logEntry->Length = htonl(len);
......@@ -4165,7 +4067,7 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal)
* At least one GCP must have passed
*/
ndbrequire(stopGCP > startGCP);
/**
* Get backup record
*/
......@@ -4174,50 +4076,13 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal)
ptr.p->slaveState.setState(STOPPING);
ptr.p->m_gsn = GSN_STOP_BACKUP_REQ;
ptr.p->startGCP= startGCP;
ptr.p->stopGCP= stopGCP;
/**
* Insert footers
* Destroy the triggers in local DBTUP we created
*/
{
BackupFilePtr filePtr;
ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr);
Uint32 * dst;
ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, 1));
* dst = 0;
filePtr.p->operation.dataBuffer.updateWritePtr(1);
}
{
BackupFilePtr filePtr;
ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
const Uint32 gcpSz = sizeof(BackupFormat::CtlFile::GCPEntry) >> 2;
Uint32 * dst;
ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, gcpSz));
BackupFormat::CtlFile::GCPEntry * gcp =
(BackupFormat::CtlFile::GCPEntry*)dst;
gcp->SectionType = htonl(BackupFormat::GCP_ENTRY);
gcp->SectionLength = htonl(gcpSz);
gcp->StartGCP = htonl(startGCP);
gcp->StopGCP = htonl(stopGCP - 1);
filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz);
}
{
TablePtr tabPtr;
for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL;
ptr.p->tables.next(tabPtr))
{
signal->theData[0] = tabPtr.p->tableId;
signal->theData[1] = 0; // unlock
EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2);
}
}
closeFiles(signal, ptr);
sendDropTrig(signal, ptr);
}
void
......@@ -4483,19 +4348,10 @@ Backup::execABORT_BACKUP_ORD(Signal* signal)
ptr.p->masterRef = reference();
ptr.p->nodes.clear();
ptr.p->nodes.set(getOwnNodeId());
if(ref == reference())
{
ptr.p->stopGCP= ptr.p->startGCP + 1;
sendDropTrig(signal, ptr);
}
else
{
ptr.p->masterData.gsn = GSN_STOP_BACKUP_REQ;
ptr.p->masterData.sendCounter.clearWaitingFor();
ptr.p->masterData.sendCounter.setWaitingFor(getOwnNodeId());
closeFiles(signal, ptr);
}
ptr.p->stopGCP= ptr.p->startGCP + 1;
sendStopBackup(signal, ptr);
}
......
......@@ -96,8 +96,6 @@ protected:
void execGET_TABINFO_CONF(Signal* signal);
void execCREATE_TRIG_REF(Signal* signal);
void execCREATE_TRIG_CONF(Signal* signal);
void execALTER_TRIG_REF(Signal* signal);
void execALTER_TRIG_CONF(Signal* signal);
void execDROP_TRIG_REF(Signal* signal);
void execDROP_TRIG_CONF(Signal* signal);
......@@ -426,6 +424,7 @@ public:
Uint32 clientRef;
Uint32 clientData;
Uint32 flags;
Uint32 signalNo;
Uint32 backupId;
Uint32 backupKey[2];
Uint32 masterRef;
......@@ -451,7 +450,18 @@ public:
Uint32 backupDataLen; // Used for (un)packing backup request
Array<Page32> pages; // Used for (un)packing backup request
SimpleProperties props;// Used for (un)packing backup request
struct SlaveData {
SignalCounter trigSendCounter;
Uint32 gsn;
struct {
Uint32 tableId;
} createTrig;
struct {
Uint32 tableId;
} dropTrig;
} slaveData;
struct MasterData {
MasterData(Backup & b)
{
......@@ -462,15 +472,6 @@ public:
Uint32 gsn;
SignalCounter sendCounter;
Uint32 errorCode;
struct {
Uint32 tableId;
} createTrig;
struct {
Uint32 tableId;
} dropTrig;
struct {
Uint32 tableId;
} alterTrig;
union {
struct {
Uint32 startBackup;
......@@ -563,7 +564,7 @@ public:
void defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId);
void createTrigReply(Signal* signal, BackupRecordPtr ptr);
void alterTrigReply(Signal* signal, BackupRecordPtr ptr);
void startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32, Uint32);
void startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32);
void stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId);
void defineBackupRef(Signal*, BackupRecordPtr, Uint32 errCode = 0);
......
......@@ -25,15 +25,12 @@ BACKUP_REQ
<-------------------------------
BACKUP_CONF
<----------------
CREATE_TRIG
--------------> (If master crashes here -> rouge triggers/memory leak)
<--------------
START_BACKUP
------------------------------>
CREATE_TRIG
-------------->
<--------------
<------------------------------
ALTER_TRIG
-------------->
<--------------
WAIT_GCP
-------------->
<--------------
......@@ -46,11 +43,11 @@ BACKUP_CONF
WAIT_GCP
-------------->
<--------------
DROP_TRIG
-------------->
<--------------
STOP_BACKUP
------------------------------>
DROP_TRIG
-------------->
<--------------
<------------------------------
BACKUP_COMPLETE_REP
<----------------
......
......@@ -62,9 +62,6 @@ Backup::Backup(const Configuration & conf) :
addRecSignal(GSN_CREATE_TRIG_REF, &Backup::execCREATE_TRIG_REF);
addRecSignal(GSN_CREATE_TRIG_CONF, &Backup::execCREATE_TRIG_CONF);
addRecSignal(GSN_ALTER_TRIG_REF, &Backup::execALTER_TRIG_REF);
addRecSignal(GSN_ALTER_TRIG_CONF, &Backup::execALTER_TRIG_CONF);
addRecSignal(GSN_DROP_TRIG_REF, &Backup::execDROP_TRIG_REF);
addRecSignal(GSN_DROP_TRIG_CONF, &Backup::execDROP_TRIG_CONF);
......
......@@ -777,6 +777,10 @@ struct TupTriggerData {
/**
* Trigger id, used by DICT/TRIX to identify the trigger
*
* trigger Ids are unique per block for SUBSCRIPTION triggers.
* This is so that BACKUP can use TUP triggers directly and delete them
* properly.
*/
Uint32 triggerId;
......@@ -2012,7 +2016,9 @@ private:
bool createTrigger(Tablerec* table, const CreateTrigReq* req);
Uint32 dropTrigger(Tablerec* table, const DropTrigReq* req);
Uint32 dropTrigger(Tablerec* table,
const DropTrigReq* req,
BlockNumber sender);
void
checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct,
......
......@@ -186,7 +186,7 @@ Dbtup::execDROP_TRIG_REQ(Signal* signal)
ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
// Drop trigger
Uint32 r = dropTrigger(tabPtr.p, req);
Uint32 r = dropTrigger(tabPtr.p, req, refToBlock(senderRef));
if (r == 0){
// Send conf
DropTrigConf* const conf = (DropTrigConf*)signal->getDataPtrSend();
......@@ -318,7 +318,7 @@ Dbtup::primaryKey(Tablerec* const regTabPtr, Uint32 attrId)
/* */
/* ---------------------------------------------------------------- */
Uint32
Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender)
{
if (ERROR_INSERTED(4004)) {
CLEAR_ERROR_INSERT_VALUE;
......@@ -330,7 +330,7 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
TriggerActionTime::Value ttime = req->getTriggerActionTime();
TriggerEvent::Value tevent = req->getTriggerEvent();
// ndbout_c("Drop TupTrigger %u = %u %u %u %u", triggerId, table, ttype, ttime, tevent);
// ndbout_c("Drop TupTrigger %u = %u %u %u %u by %u", triggerId, table, ttype, ttime, tevent, sender);
ArrayList<TupTriggerData>* tlist = findTriggerList(table, ttype, ttime, tevent);
ndbrequire(tlist != NULL);
......@@ -339,6 +339,19 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req)
for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) {
ljam();
if (ptr.p->triggerId == triggerId) {
if(ttype==TriggerType::SUBSCRIPTION && sender != ptr.p->m_receiverBlock)
{
/**
* You can only drop your own triggers for subscription triggers.
* Trigger IDs are private for each block.
*
* SUMA encodes information in the triggerId
*
* Backup doesn't really care about the Ids though.
*/
ljam();
continue;
}
ljam();
tlist->release(ptr.i);
return 0;
......
......@@ -193,7 +193,7 @@ runDDL(NDBT_Context* ctx, NDBT_Step* step){
}
int runRestartInitial(NDBT_Context* ctx, NDBT_Step* step){
int runDropTablesRestart(NDBT_Context* ctx, NDBT_Step* step){
NdbRestarter restarter;
Ndb* pNdb = GETNDB(step);
......@@ -201,7 +201,7 @@ int runRestartInitial(NDBT_Context* ctx, NDBT_Step* step){
const NdbDictionary::Table *tab = ctx->getTab();
pNdb->getDictionary()->dropTable(tab->getName());
if (restarter.restartAll(true) != 0)
if (restarter.restartAll(false) != 0)
return NDBT_FAILED;
if (restarter.waitClusterStarted() != 0)
......@@ -406,6 +406,7 @@ int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){
// TEMPORARY FIX
// To erase all tables from cache(s)
// To be removed, maybe replaced by ndb.invalidate();
runDropTable(ctx,step);
{
Bank bank(ctx->m_cluster_connection);
......@@ -416,8 +417,8 @@ int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){
}
// END TEMPORARY FIX
ndbout << "Performing initial restart" << endl;
if (restarter.restartAll(true) != 0)
ndbout << "Performing restart" << endl;
if (restarter.restartAll(false) != 0)
return NDBT_FAILED;
if (restarter.waitClusterStarted() != 0)
......@@ -465,12 +466,12 @@ TESTCASE("BackupOne",
"Test that backup and restore works on one table \n"
"1. Load table\n"
"2. Backup\n"
"3. Restart -i\n"
"3. Drop tables and restart \n"
"4. Restore\n"
"5. Verify count and content of table\n"){
INITIALIZER(runLoadTable);
INITIALIZER(runBackupOne);
INITIALIZER(runRestartInitial);
INITIALIZER(runDropTablesRestart);
INITIALIZER(runRestoreOne);
VERIFIER(runVerifyOne);
FINALIZER(runClearTable);
......
......@@ -199,7 +199,6 @@ int
NFDuringBackupM_codes[] = {
10003,
10004,
10005,
10007,
10008,
10009,
......@@ -349,6 +348,7 @@ NdbBackup::NF(NdbRestarter& _restarter, int *NFDuringBackup_codes, const int sz,
int
FailS_codes[] = {
10025,
10027,
10033
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment