Commit 0fa18a46 authored by sergefp@mysql.com's avatar sergefp@mysql.com

Merge spetrunia@bk-internal.mysql.com:/home/bk/mysql-5.1-new

into mysql.com:/home/psergey/mysql-5.1-wl2985-postfixes
parents a906e3c6 7a9265cf
...@@ -253,12 +253,17 @@ sub collect_one_test_case($$$$$$$) { ...@@ -253,12 +253,17 @@ sub collect_one_test_case($$$$$$$) {
} }
if ( ( $::opt_with_ndbcluster or $::glob_use_running_ndbcluster ) and if ( ( $::opt_with_ndbcluster or $::glob_use_running_ndbcluster ) and
defined mtr_match_substring($tname,"ndb") ) ( $::opt_with_ndbcluster_all or defined mtr_match_substring($tname,"ndb") ))
{ {
$tinfo->{'ndb_test'}= 1; $tinfo->{'ndb_test'}= 1;
} }
else else
{ {
if ( $::opt_with_ndbcluster_only )
{
$tinfo->{'skip'}= 1;
return;
}
$tinfo->{'ndb_test'}= 0; $tinfo->{'ndb_test'}= 0;
} }
......
...@@ -304,11 +304,13 @@ our $opt_warnings; ...@@ -304,11 +304,13 @@ our $opt_warnings;
our $opt_udiff; our $opt_udiff;
our $opt_skip_ndbcluster; our $opt_skip_ndbcluster= 0;
our $opt_with_ndbcluster; our $opt_with_ndbcluster;
our $opt_skip_ndbcluster_slave; our $opt_skip_ndbcluster_slave= 0;
our $opt_with_ndbcluster_slave; our $opt_with_ndbcluster_slave;
our $opt_ndb_extra_test; our $opt_with_ndbcluster_all= 0;
our $opt_with_ndbcluster_only= 0;
our $opt_ndb_extra_test= 0;
our $exe_ndb_mgm; our $exe_ndb_mgm;
our $path_ndb_tools_dir; our $path_ndb_tools_dir;
...@@ -548,6 +550,8 @@ sub command_line_setup () { ...@@ -548,6 +550,8 @@ sub command_line_setup () {
'with-ndbcluster-slave' => \$opt_with_ndbcluster_slave, 'with-ndbcluster-slave' => \$opt_with_ndbcluster_slave,
'skip-ndbcluster-slave|skip-ndb-slave' 'skip-ndbcluster-slave|skip-ndb-slave'
=> \$opt_skip_ndbcluster_slave, => \$opt_skip_ndbcluster_slave,
'with-ndbcluster-all' => \$opt_with_ndbcluster_all,
'with-ndbcluster-only' => \$opt_with_ndbcluster_only,
'ndb-extra-test' => \$opt_ndb_extra_test, 'ndb-extra-test' => \$opt_ndb_extra_test,
'do-test=s' => \$opt_do_test, 'do-test=s' => \$opt_do_test,
'suite=s' => \$opt_suite, 'suite=s' => \$opt_suite,
...@@ -1938,6 +1942,10 @@ sub run_testcase ($) { ...@@ -1938,6 +1942,10 @@ sub run_testcase ($) {
mtr_report_test_name($tinfo); mtr_report_test_name($tinfo);
mtr_tofile($master->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n"); mtr_tofile($master->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n");
if ( $master->[1]->{'pid'} )
{
mtr_tofile($master->[1]->{'path_myerr'},"CURRENT_TEST: $tname\n");
}
# FIXME test cases that depend on each other, prevent this from # FIXME test cases that depend on each other, prevent this from
# being at this location. # being at this location.
...@@ -1986,6 +1994,7 @@ sub run_testcase ($) { ...@@ -1986,6 +1994,7 @@ sub run_testcase ($) {
} }
if ( $using_ndbcluster_master and ! $master->[1]->{'pid'} ) if ( $using_ndbcluster_master and ! $master->[1]->{'pid'} )
{ {
mtr_tofile($master->[1]->{'path_myerr'},"CURRENT_TEST: $tname\n");
$master->[1]->{'pid'}= $master->[1]->{'pid'}=
mysqld_start('master',1,$tinfo->{'master_opt'},[], mysqld_start('master',1,$tinfo->{'master_opt'},[],
$using_ndbcluster_master); $using_ndbcluster_master);
...@@ -3038,7 +3047,9 @@ Options to control what engine/variation to run ...@@ -3038,7 +3047,9 @@ Options to control what engine/variation to run
Options to control what test suites or cases to run Options to control what test suites or cases to run
force Continue to run the suite after failure force Continue to run the suite after failure
with-ndbcluster Use cluster, and enable test cases that requres it with-ndbcluster Use cluster, and enable test cases that requires it
with-ndbcluster-all Use cluster in all tests
with-ndbcluster-only Run only tests that include "ndb" in the filename
skip-ndb[cluster] Skip the ndb test cases, don't start cluster skip-ndb[cluster] Skip the ndb test cases, don't start cluster
do-test=PREFIX Run test cases which name are prefixed with PREFIX do-test=PREFIX Run test cases which name are prefixed with PREFIX
start-from=PREFIX Run test cases starting from test prefixed with PREFIX start-from=PREFIX Run test cases starting from test prefixed with PREFIX
......
...@@ -630,7 +630,7 @@ export MASTER_MYHOST MASTER_MYPORT SLAVE_MYHOST SLAVE_MYPORT MYSQL_TCP_PORT MAST ...@@ -630,7 +630,7 @@ export MASTER_MYHOST MASTER_MYPORT SLAVE_MYHOST SLAVE_MYPORT MYSQL_TCP_PORT MAST
NDBCLUSTER_OPTS="--port=$NDBCLUSTER_PORT --data-dir=$MYSQL_TEST_DIR/var --ndb_mgm-extra-opts=$NDB_MGM_EXTRA_OPTS --ndb_mgmd-extra-opts=$NDB_MGMD_EXTRA_OPTS --ndbd-extra-opts=$NDBD_EXTRA_OPTS" NDBCLUSTER_OPTS="--port=$NDBCLUSTER_PORT --data-dir=$MYSQL_TEST_DIR/var --ndb_mgm-extra-opts=$NDB_MGM_EXTRA_OPTS --ndb_mgmd-extra-opts=$NDB_MGMD_EXTRA_OPTS --ndbd-extra-opts=$NDBD_EXTRA_OPTS"
NDBCLUSTER_OPTS_SLAVE="--port=$NDBCLUSTER_PORT_SLAVE --data-dir=$MYSQL_TEST_DIR/var" NDBCLUSTER_OPTS_SLAVE="--port=$NDBCLUSTER_PORT_SLAVE --data-dir=$MYSQL_TEST_DIR/var"
if [ -n "$USE_NDBCLUSTER_SLAVE" ] ; then if [ -n "$USE_NDBCLUSTER_SLAVE" ] ; then
USE_NDBCLUSTER_SLAVE="$USE_NDBCLUSTER_SLAVE --ndb-connectstring=localhost:$NDBCLUSTER_PORT_SLAVE" USE_NDBCLUSTER_SLAVE="$USE_NDBCLUSTER_SLAVE --ndb-connectstring=localhost:$NDBCLUSTER_PORT_SLAVE --ndb-extra-logging"
fi fi
NDB_BACKUP_DIR=$MYSQL_TEST_DIR/var/ndbcluster-$NDBCLUSTER_PORT NDB_BACKUP_DIR=$MYSQL_TEST_DIR/var/ndbcluster-$NDBCLUSTER_PORT
NDB_TOOLS_OUTPUT=$MYSQL_TEST_DIR/var/log/ndb_tools.log NDB_TOOLS_OUTPUT=$MYSQL_TEST_DIR/var/log/ndb_tools.log
...@@ -1016,6 +1016,10 @@ disable_test() { ...@@ -1016,6 +1016,10 @@ disable_test() {
report_current_test () { report_current_test () {
tname=$1 tname=$1
echo "CURRENT_TEST: $tname" >> $MASTER_MYERR echo "CURRENT_TEST: $tname" >> $MASTER_MYERR
eval "master1_running=\$MASTER1_RUNNING"
if [ x$master1_running = x1 ] ; then
echo "CURRENT_TEST: $tname" >> $MASTER_MYERR"1"
fi
if [ -n "$PURIFY_LOGS" ] ; then if [ -n "$PURIFY_LOGS" ] ; then
for log in $PURIFY_LOGS for log in $PURIFY_LOGS
do do
...@@ -1297,7 +1301,7 @@ start_ndbcluster() ...@@ -1297,7 +1301,7 @@ start_ndbcluster()
NDB_CONNECTSTRING="$USE_RUNNING_NDBCLUSTER" NDB_CONNECTSTRING="$USE_RUNNING_NDBCLUSTER"
echo "Using ndbcluster at $NDB_CONNECTSTRING" echo "Using ndbcluster at $NDB_CONNECTSTRING"
fi fi
USE_NDBCLUSTER_OPT="$USE_NDBCLUSTER --ndb-connectstring=\"$NDB_CONNECTSTRING\"" USE_NDBCLUSTER_OPT="$USE_NDBCLUSTER --ndb-connectstring=\"$NDB_CONNECTSTRING\" --ndb-extra-logging"
export NDB_CONNECTSTRING export NDB_CONNECTSTRING
fi fi
} }
...@@ -1876,6 +1880,7 @@ run_testcase () ...@@ -1876,6 +1880,7 @@ run_testcase ()
start_ndbcluster start_ndbcluster
start_master start_master
if [ x$USING_NDBCLUSTER = x1 -a -z "$DO_BENCH" -a -z "$DO_STRESS" ] ; then if [ x$USING_NDBCLUSTER = x1 -a -z "$DO_BENCH" -a -z "$DO_STRESS" ] ; then
echo "CURRENT_TEST: $tname" >> $MASTER_MYERR"1"
start_master 1 start_master 1
fi fi
TZ=$MY_TZ; export TZ TZ=$MY_TZ; export TZ
...@@ -1903,6 +1908,7 @@ run_testcase () ...@@ -1903,6 +1908,7 @@ run_testcase ()
start_ndbcluster start_ndbcluster
start_master start_master
if [ x$USING_NDBCLUSTER = x1 -a -z "$DO_BENCH" -a -z "$DO_STRESS" ] ; then if [ x$USING_NDBCLUSTER = x1 -a -z "$DO_BENCH" -a -z "$DO_STRESS" ] ; then
echo "CURRENT_TEST: $tname" >> $MASTER_MYERR"1"
start_master 1 start_master 1
fi fi
else else
......
This diff is collapsed.
...@@ -348,7 +348,7 @@ select count(*) ...@@ -348,7 +348,7 @@ select count(*)
select * from t9_c) a; select * from t9_c) a;
drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c;
--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 1 -m -r --ndb-nodegroup_map '(0,1)' $NDB_BACKUP_DIR/BACKUP/BACKUP-2 2>&1 | grep Translate || true --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --core=0 -b 2 -n 1 -m -r --ndb-nodegroup_map '(0,1)' $NDB_BACKUP_DIR/BACKUP/BACKUP-2 2>&1 | grep Translate || true
# #
# Cleanup # Cleanup
......
This diff is collapsed.
...@@ -4770,13 +4770,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) ...@@ -4770,13 +4770,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
"Creating event for logging table failed. " "Creating event for logging table failed. "
"See error log for details."); "See error log for details.");
} }
if (is_old_table_tmpfile) if (!is_old_table_tmpfile)
ndbcluster_log_schema_op(current_thd, share,
current_thd->query, current_thd->query_length,
m_dbname, new_tabname,
0, 0,
SOT_ALTER_TABLE);
else
ndbcluster_log_schema_op(current_thd, share, ndbcluster_log_schema_op(current_thd, share,
current_thd->query, current_thd->query_length, current_thd->query, current_thd->query_length,
m_dbname, new_tabname, m_dbname, new_tabname,
......
...@@ -441,6 +441,7 @@ ndbcluster_binlog_log_query(THD *thd, enum_binlog_command binlog_command, ...@@ -441,6 +441,7 @@ ndbcluster_binlog_log_query(THD *thd, enum_binlog_command binlog_command,
break; break;
case LOGCOM_ALTER_TABLE: case LOGCOM_ALTER_TABLE:
type= SOT_ALTER_TABLE; type= SOT_ALTER_TABLE;
log= 1;
break; break;
case LOGCOM_RENAME_TABLE: case LOGCOM_RENAME_TABLE:
type= SOT_RENAME_TABLE; type= SOT_RENAME_TABLE;
...@@ -461,8 +462,10 @@ ndbcluster_binlog_log_query(THD *thd, enum_binlog_command binlog_command, ...@@ -461,8 +462,10 @@ ndbcluster_binlog_log_query(THD *thd, enum_binlog_command binlog_command,
break; break;
} }
if (log) if (log)
{
ndbcluster_log_schema_op(thd, 0, query, query_length, ndbcluster_log_schema_op(thd, 0, query, query_length,
db, table_name, 0, 0, type); db, table_name, 0, 0, type);
}
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
...@@ -891,6 +894,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, ...@@ -891,6 +894,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
} }
char tmp_buf2[FN_REFLEN]; char tmp_buf2[FN_REFLEN];
int get_a_share= 0;
switch (type) switch (type)
{ {
case SOT_DROP_TABLE: case SOT_DROP_TABLE:
...@@ -900,13 +904,15 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, ...@@ -900,13 +904,15 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
/* redo the drop table query as is may contain several tables */ /* redo the drop table query as is may contain several tables */
query= tmp_buf2; query= tmp_buf2;
query_length= (uint) (strxmov(tmp_buf2, "drop table `", query_length= (uint) (strxmov(tmp_buf2, "drop table `",
table_name, "`", NullS) - tmp_buf2); table_name, "`", NullS) - tmp_buf2);
break; // fall through
case SOT_CREATE_TABLE: case SOT_CREATE_TABLE:
break; // fall through
case SOT_RENAME_TABLE: case SOT_RENAME_TABLE:
break; // fall through
case SOT_ALTER_TABLE: case SOT_ALTER_TABLE:
if (!share)
get_a_share= 1;
break; break;
case SOT_DROP_DB: case SOT_DROP_DB:
break; break;
...@@ -922,6 +928,14 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, ...@@ -922,6 +928,14 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
abort(); /* should not happen, programming error */ abort(); /* should not happen, programming error */
} }
if (get_a_share)
{
char key[FN_REFLEN];
(void)strxnmov(key, FN_REFLEN, share_prefix, db,
"/", table_name, NullS);
share= get_share(key, 0, false, false);
}
const NdbError *ndb_error= 0; const NdbError *ndb_error= 0;
uint32 node_id= g_ndb_cluster_connection->node_id(); uint32 node_id= g_ndb_cluster_connection->node_id();
Uint64 epoch= 0; Uint64 epoch= 0;
...@@ -956,7 +970,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, ...@@ -956,7 +970,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
} }
Ndb *ndb= thd_ndb->ndb; Ndb *ndb= thd_ndb->ndb;
char old_db[128]; char old_db[FN_REFLEN];
strcpy(old_db, ndb->getDatabaseName()); strcpy(old_db, ndb->getDatabaseName());
char tmp_buf[SCHEMA_QUERY_SIZE]; char tmp_buf[SCHEMA_QUERY_SIZE];
...@@ -974,9 +988,8 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, ...@@ -974,9 +988,8 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
strcmp(NDB_SCHEMA_TABLE, table_name)) strcmp(NDB_SCHEMA_TABLE, table_name))
{ {
ndb_error= &dict->getNdbError(); ndb_error= &dict->getNdbError();
goto end;
} }
DBUG_RETURN(0); goto end;
} }
{ {
...@@ -1119,6 +1132,10 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, ...@@ -1119,6 +1132,10 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share,
} }
(void) pthread_mutex_unlock(&share->mutex); (void) pthread_mutex_unlock(&share->mutex);
} }
if (get_a_share)
free_share(&share);
DBUG_RETURN(0); DBUG_RETURN(0);
} }
...@@ -1328,7 +1345,10 @@ static int ...@@ -1328,7 +1345,10 @@ static int
ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
NdbEventOperation *pOp, NdbEventOperation *pOp,
List<Cluster_replication_schema> List<Cluster_replication_schema>
*schema_list, MEM_ROOT *mem_root) *post_epoch_log_list,
List<Cluster_replication_schema>
*post_epoch_unlock_list,
MEM_ROOT *mem_root)
{ {
DBUG_ENTER("ndb_binlog_thread_handle_schema_event"); DBUG_ENTER("ndb_binlog_thread_handle_schema_event");
NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData(); NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData();
...@@ -1357,7 +1377,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, ...@@ -1357,7 +1377,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
{ {
case SOT_DROP_TABLE: case SOT_DROP_TABLE:
/* binlog dropping table after any table operations */ /* binlog dropping table after any table operations */
schema_list->push_back(schema, mem_root); post_epoch_log_list->push_back(schema, mem_root);
log_query= 0; log_query= 0;
break; break;
case SOT_RENAME_TABLE: case SOT_RENAME_TABLE:
...@@ -1389,7 +1409,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, ...@@ -1389,7 +1409,7 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
TRUE, /* print error */ TRUE, /* print error */
TRUE); /* don't binlog the query */ TRUE); /* don't binlog the query */
/* binlog dropping database after any table operations */ /* binlog dropping database after any table operations */
schema_list->push_back(schema, mem_root); post_epoch_log_list->push_back(schema, mem_root);
log_query= 0; log_query= 0;
break; break;
case SOT_CREATE_DB: case SOT_CREATE_DB:
...@@ -1431,7 +1451,18 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, ...@@ -1431,7 +1451,18 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
{ {
DBUG_DUMP("slock", (char*)schema->slock, schema->slock_length); DBUG_DUMP("slock", (char*)schema->slock, schema->slock_length);
if (bitmap_is_set(&slock, node_id)) if (bitmap_is_set(&slock, node_id))
ndbcluster_update_slock(thd, schema->db, schema->name); {
/*
If it is an SOT_ALTER_TABLE we need to acknowledge the
schema operation _after_ all the events have been
processed so that all schema events coming through
the event operation has been processed
*/
if ((enum SCHEMA_OP_TYPE)schema->type == SOT_ALTER_TABLE)
post_epoch_unlock_list->push_back(schema, mem_root);
else
ndbcluster_update_slock(thd, schema->db, schema->name);
}
} }
if (log_query) if (log_query)
...@@ -2738,7 +2769,8 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) ...@@ -2738,7 +2769,8 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
MEM_ROOT *old_root= *root_ptr; MEM_ROOT *old_root= *root_ptr;
MEM_ROOT mem_root; MEM_ROOT mem_root;
init_sql_alloc(&mem_root, 4096, 0); init_sql_alloc(&mem_root, 4096, 0);
List<Cluster_replication_schema> schema_list; List<Cluster_replication_schema> post_epoch_log_list;
List<Cluster_replication_schema> post_epoch_unlock_list;
*root_ptr= &mem_root; *root_ptr= &mem_root;
if (unlikely(schema_res > 0)) if (unlikely(schema_res > 0))
...@@ -2751,7 +2783,9 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) ...@@ -2751,7 +2783,9 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
{ {
if (!pOp->hasError()) if (!pOp->hasError())
ndb_binlog_thread_handle_schema_event(thd, schema_ndb, pOp, ndb_binlog_thread_handle_schema_event(thd, schema_ndb, pOp,
&schema_list, &mem_root); &post_epoch_log_list,
&post_epoch_unlock_list,
&mem_root);
else else
sql_print_error("NDB: error %lu (%s) on handling " sql_print_error("NDB: error %lu (%s) on handling "
"binlog schema event", "binlog schema event",
...@@ -2878,9 +2912,17 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) ...@@ -2878,9 +2912,17 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
} }
} }
/*
process any operations that should be done after
the epoch is complete
*/
{ {
Cluster_replication_schema *schema; Cluster_replication_schema *schema;
while ((schema= schema_list.pop())) while ((schema= post_epoch_unlock_list.pop()))
{
ndbcluster_update_slock(thd, schema->db, schema->name);
}
while ((schema= post_epoch_log_list.pop()))
{ {
char *thd_db_save= thd->db; char *thd_db_save= thd->db;
thd->db= schema->db; thd->db= schema->db;
......
...@@ -29,18 +29,24 @@ extern ulong ndb_extra_logging; ...@@ -29,18 +29,24 @@ extern ulong ndb_extra_logging;
#define INJECTOR_EVENT_LEN 200 #define INJECTOR_EVENT_LEN 200
/*
The numbers below must not change as they
are passed between mysql servers, and if changed
would break compatablility. Add new numbers to
the end.
*/
enum SCHEMA_OP_TYPE enum SCHEMA_OP_TYPE
{ {
SOT_DROP_TABLE, SOT_DROP_TABLE= 0,
SOT_CREATE_TABLE, SOT_CREATE_TABLE= 1,
SOT_RENAME_TABLE, SOT_RENAME_TABLE= 2,
SOT_ALTER_TABLE, SOT_ALTER_TABLE= 3,
SOT_DROP_DB, SOT_DROP_DB= 4,
SOT_CREATE_DB, SOT_CREATE_DB= 5,
SOT_ALTER_DB, SOT_ALTER_DB= 6,
SOT_CLEAR_SLOCK, SOT_CLEAR_SLOCK= 7,
SOT_TABLESPACE, SOT_TABLESPACE= 8,
SOT_LOGFILE_GROUP SOT_LOGFILE_GROUP= 9
}; };
const uint max_ndb_nodes= 64; /* multiple of 32 */ const uint max_ndb_nodes= 64; /* multiple of 32 */
......
...@@ -2529,12 +2529,11 @@ struct binlog_log_query_st ...@@ -2529,12 +2529,11 @@ struct binlog_log_query_st
const char *table_name; const char *table_name;
}; };
static my_bool binlog_log_query_handlerton(THD *thd, static my_bool binlog_log_query_handlerton2(THD *thd,
st_plugin_int *plugin, const handlerton *hton,
void *args) void *args)
{ {
struct binlog_log_query_st *b= (struct binlog_log_query_st*)args; struct binlog_log_query_st *b= (struct binlog_log_query_st*)args;
handlerton *hton= (handlerton *) plugin->plugin->info;
if (hton->state == SHOW_OPTION_YES && hton->binlog_log_query) if (hton->state == SHOW_OPTION_YES && hton->binlog_log_query)
hton->binlog_log_query(thd, hton->binlog_log_query(thd,
b->binlog_command, b->binlog_command,
...@@ -2545,7 +2544,15 @@ static my_bool binlog_log_query_handlerton(THD *thd, ...@@ -2545,7 +2544,15 @@ static my_bool binlog_log_query_handlerton(THD *thd,
return FALSE; return FALSE;
} }
void ha_binlog_log_query(THD *thd, enum_binlog_command binlog_command, static my_bool binlog_log_query_handlerton(THD *thd,
st_plugin_int *plugin,
void *args)
{
return binlog_log_query_handlerton2(thd, (const handlerton *) plugin->plugin->info, args);
}
void ha_binlog_log_query(THD *thd, const handlerton *hton,
enum_binlog_command binlog_command,
const char *query, uint query_length, const char *query, uint query_length,
const char *db, const char *table_name) const char *db, const char *table_name)
{ {
...@@ -2555,8 +2562,11 @@ void ha_binlog_log_query(THD *thd, enum_binlog_command binlog_command, ...@@ -2555,8 +2562,11 @@ void ha_binlog_log_query(THD *thd, enum_binlog_command binlog_command,
b.query_length= query_length; b.query_length= query_length;
b.db= db; b.db= db;
b.table_name= table_name; b.table_name= table_name;
plugin_foreach(thd, binlog_log_query_handlerton, if (hton == 0)
MYSQL_STORAGE_ENGINE_PLUGIN, &b); plugin_foreach(thd, binlog_log_query_handlerton,
MYSQL_STORAGE_ENGINE_PLUGIN, &b);
else
binlog_log_query_handlerton2(thd, hton, &b);
} }
#endif #endif
......
...@@ -2020,7 +2020,8 @@ int ha_repl_report_replication_stop(THD *thd); ...@@ -2020,7 +2020,8 @@ int ha_repl_report_replication_stop(THD *thd);
int ha_reset_logs(THD *thd); int ha_reset_logs(THD *thd);
int ha_binlog_index_purge_file(THD *thd, const char *file); int ha_binlog_index_purge_file(THD *thd, const char *file);
void ha_reset_slave(THD *thd); void ha_reset_slave(THD *thd);
void ha_binlog_log_query(THD *thd, enum_binlog_command binlog_command, void ha_binlog_log_query(THD *thd, const handlerton *db_type,
enum_binlog_command binlog_command,
const char *query, uint query_length, const char *query, uint query_length,
const char *db, const char *table_name); const char *db, const char *table_name);
void ha_binlog_wait(THD *thd); void ha_binlog_wait(THD *thd);
......
...@@ -5257,6 +5257,7 @@ void Item_insert_value::print(String *str) ...@@ -5257,6 +5257,7 @@ void Item_insert_value::print(String *str)
setup_field() setup_field()
thd - current thread context thd - current thread context
table - table of trigger (and where we looking for fields) table - table of trigger (and where we looking for fields)
table_grant_info - GRANT_INFO of the subject table
NOTE NOTE
This function does almost the same as fix_fields() for Item_field This function does almost the same as fix_fields() for Item_field
...@@ -5270,7 +5271,8 @@ void Item_insert_value::print(String *str) ...@@ -5270,7 +5271,8 @@ void Item_insert_value::print(String *str)
table of trigger which uses this item. table of trigger which uses this item.
*/ */
void Item_trigger_field::setup_field(THD *thd, TABLE *table) void Item_trigger_field::setup_field(THD *thd, TABLE *table,
GRANT_INFO *table_grant_info)
{ {
bool save_set_query_id= thd->set_query_id; bool save_set_query_id= thd->set_query_id;
...@@ -5284,6 +5286,7 @@ void Item_trigger_field::setup_field(THD *thd, TABLE *table) ...@@ -5284,6 +5286,7 @@ void Item_trigger_field::setup_field(THD *thd, TABLE *table)
0, &field_idx); 0, &field_idx);
thd->set_query_id= save_set_query_id; thd->set_query_id= save_set_query_id;
triggers= table->triggers; triggers= table->triggers;
table_grants= table_grant_info;
} }
...@@ -5302,22 +5305,42 @@ bool Item_trigger_field::fix_fields(THD *thd, Item **items) ...@@ -5302,22 +5305,42 @@ bool Item_trigger_field::fix_fields(THD *thd, Item **items)
Since trigger is object tightly associated with TABLE object most Since trigger is object tightly associated with TABLE object most
of its set up can be performed during trigger loading i.e. trigger of its set up can be performed during trigger loading i.e. trigger
parsing! So we have little to do in fix_fields. :) parsing! So we have little to do in fix_fields. :)
FIXME may be we still should bother about permissions here.
*/ */
DBUG_ASSERT(fixed == 0); DBUG_ASSERT(fixed == 0);
/* Set field. */
if (field_idx != (uint)-1) if (field_idx != (uint)-1)
{ {
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/*
Check access privileges for the subject table. We check privileges only
in runtime.
*/
if (table_grants)
{
table_grants->want_privilege=
access_type == AT_READ ? SELECT_ACL : UPDATE_ACL;
if (check_grant_column(thd, table_grants, triggers->table->s->db.str,
triggers->table->s->table_name.str, field_name,
strlen(field_name), thd->security_ctx))
return TRUE;
}
#endif // NO_EMBEDDED_ACCESS_CHECKS
field= (row_version == OLD_ROW) ? triggers->old_field[field_idx] : field= (row_version == OLD_ROW) ? triggers->old_field[field_idx] :
triggers->new_field[field_idx]; triggers->new_field[field_idx];
set_field(field); set_field(field);
fixed= 1; fixed= 1;
return 0; return FALSE;
} }
my_error(ER_BAD_FIELD_ERROR, MYF(0), field_name, my_error(ER_BAD_FIELD_ERROR, MYF(0), field_name,
(row_version == NEW_ROW) ? "NEW" : "OLD"); (row_version == NEW_ROW) ? "NEW" : "OLD");
return 1; return TRUE;
} }
......
...@@ -2172,6 +2172,8 @@ class Item_trigger_field : public Item_field ...@@ -2172,6 +2172,8 @@ class Item_trigger_field : public Item_field
/* Is this item represents row from NEW or OLD row ? */ /* Is this item represents row from NEW or OLD row ? */
enum row_version_type {OLD_ROW, NEW_ROW}; enum row_version_type {OLD_ROW, NEW_ROW};
row_version_type row_version; row_version_type row_version;
/* Is this item used for reading or updating the value? */
enum access_types { AT_READ = 0x1, AT_UPDATE = 0x2 };
/* Next in list of all Item_trigger_field's in trigger */ /* Next in list of all Item_trigger_field's in trigger */
Item_trigger_field *next_trg_field; Item_trigger_field *next_trg_field;
/* Index of the field in the TABLE::field array */ /* Index of the field in the TABLE::field array */
...@@ -2181,18 +2183,24 @@ class Item_trigger_field : public Item_field ...@@ -2181,18 +2183,24 @@ class Item_trigger_field : public Item_field
Item_trigger_field(Name_resolution_context *context_arg, Item_trigger_field(Name_resolution_context *context_arg,
row_version_type row_ver_arg, row_version_type row_ver_arg,
const char *field_name_arg) const char *field_name_arg,
access_types access_type_arg)
:Item_field(context_arg, :Item_field(context_arg,
(const char *)NULL, (const char *)NULL, field_name_arg), (const char *)NULL, (const char *)NULL, field_name_arg),
row_version(row_ver_arg), field_idx((uint)-1) row_version(row_ver_arg), field_idx((uint)-1),
access_type(access_type_arg), table_grants(NULL)
{} {}
void setup_field(THD *thd, TABLE *table); void setup_field(THD *thd, TABLE *table, GRANT_INFO *table_grant_info);
enum Type type() const { return TRIGGER_FIELD_ITEM; } enum Type type() const { return TRIGGER_FIELD_ITEM; }
bool eq(const Item *item, bool binary_cmp) const; bool eq(const Item *item, bool binary_cmp) const;
bool fix_fields(THD *, Item **); bool fix_fields(THD *, Item **);
void print(String *str); void print(String *str);
table_map used_tables() const { return (table_map)0L; } table_map used_tables() const { return (table_map)0L; }
void cleanup(); void cleanup();
private:
access_types access_type;
GRANT_INFO *table_grants;
}; };
......
...@@ -501,7 +501,7 @@ bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, ...@@ -501,7 +501,7 @@ bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info,
query_length= thd->query_length; query_length= thd->query_length;
} }
ha_binlog_log_query(thd, LOGCOM_CREATE_DB, ha_binlog_log_query(thd, 0, LOGCOM_CREATE_DB,
query, query_length, query, query_length,
db, ""); db, "");
...@@ -579,7 +579,7 @@ bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) ...@@ -579,7 +579,7 @@ bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info)
thd->variables.collation_database= thd->db_charset; thd->variables.collation_database= thd->db_charset;
} }
ha_binlog_log_query(thd, LOGCOM_ALTER_DB, ha_binlog_log_query(thd, 0, LOGCOM_ALTER_DB,
thd->query, thd->query_length, thd->query, thd->query_length,
db, ""); db, "");
......
...@@ -4974,6 +4974,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, ...@@ -4974,6 +4974,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
} }
thd->proc_info="end"; thd->proc_info="end";
ha_binlog_log_query(thd, create_info->db_type, LOGCOM_ALTER_TABLE,
thd->query, thd->query_length,
db, table_name);
DBUG_ASSERT(!(mysql_bin_log.is_open() && binlog_row_based && DBUG_ASSERT(!(mysql_bin_log.is_open() && binlog_row_based &&
(create_info->options & HA_LEX_CREATE_TMP_TABLE))); (create_info->options & HA_LEX_CREATE_TMP_TABLE)));
write_bin_log(thd, TRUE, thd->query, thd->query_length); write_bin_log(thd, TRUE, thd->query, thd->query_length);
......
...@@ -390,7 +390,12 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, ...@@ -390,7 +390,12 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
for (trg_field= (Item_trigger_field *)(lex->trg_table_fields.first); for (trg_field= (Item_trigger_field *)(lex->trg_table_fields.first);
trg_field; trg_field= trg_field->next_trg_field) trg_field; trg_field= trg_field->next_trg_field)
{ {
trg_field->setup_field(thd, table); /*
NOTE: now we do not check privileges at CREATE TRIGGER time. This will
be changed in the future.
*/
trg_field->setup_field(thd, table, NULL);
if (!trg_field->fixed && if (!trg_field->fixed &&
trg_field->fix_fields(thd, (Item **)0)) trg_field->fix_fields(thd, (Item **)0))
return 1; return 1;
...@@ -826,8 +831,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, ...@@ -826,8 +831,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
char *trg_name_buff; char *trg_name_buff;
List_iterator_fast<ulonglong> itm(triggers->definition_modes_list); List_iterator_fast<ulonglong> itm(triggers->definition_modes_list);
List_iterator_fast<LEX_STRING> it_definer(triggers-> List_iterator_fast<LEX_STRING> it_definer(triggers->definers_list);
definers_list);
LEX *old_lex= thd->lex, lex; LEX *old_lex= thd->lex, lex;
sp_rcontext *save_spcont= thd->spcont; sp_rcontext *save_spcont= thd->spcont;
ulong save_sql_mode= thd->variables.sql_mode; ulong save_sql_mode= thd->variables.sql_mode;
...@@ -842,6 +846,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, ...@@ -842,6 +846,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
{ {
trg_sql_mode= itm++; trg_sql_mode= itm++;
LEX_STRING *trg_definer= it_definer++; LEX_STRING *trg_definer= it_definer++;
thd->variables.sql_mode= (ulong)*trg_sql_mode; thd->variables.sql_mode= (ulong)*trg_sql_mode;
lex_start(thd, (uchar*)trg_create_str->str, trg_create_str->length); lex_start(thd, (uchar*)trg_create_str->str, trg_create_str->length);
...@@ -915,11 +920,11 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, ...@@ -915,11 +920,11 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
(Item_trigger_field *)(lex.trg_table_fields.first); (Item_trigger_field *)(lex.trg_table_fields.first);
trg_field; trg_field;
trg_field= trg_field->next_trg_field) trg_field= trg_field->next_trg_field)
trg_field->setup_field(thd, table); {
trg_field->setup_field(thd, table,
triggers->m_spec_var_used[lex.trg_chistics.event] &triggers->subject_table_grants[lex.trg_chistics.event]
[lex.trg_chistics.action_time]= [lex.trg_chistics.action_time]);
lex.trg_table_fields.first ? TRUE : FALSE; }
lex_end(&lex); lex_end(&lex);
} }
...@@ -1159,38 +1164,30 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event, ...@@ -1159,38 +1164,30 @@ bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event,
if (sp_change_security_context(thd, sp_trigger, &save_ctx)) if (sp_change_security_context(thd, sp_trigger, &save_ctx))
return TRUE; return TRUE;
{ /*
TABLE_LIST table_list, **save_query_tables_own_last; Fetch information about table-level privileges to GRANT_INFO structure for
ulong wanted_access = TRIGGER_ACL; subject table. Check of privileges that will use it and information about
column-level privileges will happen in Item_trigger_field::fix_fields().
bzero((char *) &table_list, sizeof (table_list)); */
table_list.db= (char *) table->s->db.str;
table_list.db_length= table->s->db.length;
table_list.table_name= table->s->table_name.str;
table_list.table_name_length= table->s->table_name.length;
table_list.alias= (char *) table->alias;
table_list.table= table;
save_query_tables_own_last= thd->lex->query_tables_own_last;
thd->lex->query_tables_own_last= 0;
/*
If the trigger uses special variables (NEW/OLD), check that we have
SELECT and UPDATE privileges on the subject table.
*/
if (is_special_var_used(event, time_type)) fill_effective_table_privileges(thd,
wanted_access|= SELECT_ACL | UPDATE_ACL; &subject_table_grants[event][time_type],
table->s->db.str, table->s->table_name.str);
err_status= check_table_access(thd, wanted_access, &table_list, 0); /* Check that the definer has TRIGGER privilege on the subject table. */
thd->lex->query_tables_own_last= save_query_tables_own_last; if (!(subject_table_grants[event][time_type].privilege & TRIGGER_ACL))
if (err_status) {
{ char priv_desc[128];
sp_restore_security_context(thd, save_ctx); get_privilege_desc(priv_desc, sizeof(priv_desc), TRIGGER_ACL);
return TRUE;
} my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0), priv_desc,
thd->security_ctx->priv_user, thd->security_ctx->host_or_ip,
table->s->table_name);
sp_restore_security_context(thd, save_ctx);
return TRUE;
} }
#endif // NO_EMBEDDED_ACCESS_CHECKS #endif // NO_EMBEDDED_ACCESS_CHECKS
thd->reset_sub_statement_state(&statement_state, SUB_STMT_TRIGGER); thd->reset_sub_statement_state(&statement_state, SUB_STMT_TRIGGER);
......
...@@ -56,10 +56,9 @@ class Table_triggers_list: public Sql_alloc ...@@ -56,10 +56,9 @@ class Table_triggers_list: public Sql_alloc
LEX_STRING sroutines_key; LEX_STRING sroutines_key;
/* /*
is_special_var_used specifies whether trigger body contains special Grant information for each trigger (pair: subject table, trigger definer).
variables (NEW/OLD).
*/ */
bool m_spec_var_used[TRG_EVENT_MAX][TRG_ACTION_MAX]; GRANT_INFO subject_table_grants[TRG_EVENT_MAX][TRG_ACTION_MAX];
public: public:
/* /*
...@@ -78,6 +77,7 @@ class Table_triggers_list: public Sql_alloc ...@@ -78,6 +77,7 @@ class Table_triggers_list: public Sql_alloc
record1_field(0), table(table_arg) record1_field(0), table(table_arg)
{ {
bzero((char *)bodies, sizeof(bodies)); bzero((char *)bodies, sizeof(bodies));
bzero((char *)&subject_table_grants, sizeof(subject_table_grants));
} }
~Table_triggers_list(); ~Table_triggers_list();
...@@ -109,11 +109,6 @@ class Table_triggers_list: public Sql_alloc ...@@ -109,11 +109,6 @@ class Table_triggers_list: public Sql_alloc
return test(bodies[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE]); return test(bodies[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE]);
} }
inline bool is_special_var_used(int event, int action_time) const
{
return m_spec_var_used[event][action_time];
}
void set_table(TABLE *new_table); void set_table(TABLE *new_table);
friend class Item_trigger_field; friend class Item_trigger_field;
......
...@@ -9043,7 +9043,8 @@ simple_ident_q: ...@@ -9043,7 +9043,8 @@ simple_ident_q:
new_row ? new_row ?
Item_trigger_field::NEW_ROW: Item_trigger_field::NEW_ROW:
Item_trigger_field::OLD_ROW, Item_trigger_field::OLD_ROW,
$3.str))) $3.str,
Item_trigger_field::AT_READ)))
YYABORT; YYABORT;
/* /*
...@@ -9727,7 +9728,9 @@ sys_option_value: ...@@ -9727,7 +9728,9 @@ sys_option_value:
if (!(trg_fld= new Item_trigger_field(Lex->current_context(), if (!(trg_fld= new Item_trigger_field(Lex->current_context(),
Item_trigger_field::NEW_ROW, Item_trigger_field::NEW_ROW,
$2.base_name.str)) || $2.base_name.str,
Item_trigger_field::AT_UPDATE)
) ||
!(sp_fld= new sp_instr_set_trigger_field(lex->sphead-> !(sp_fld= new sp_instr_set_trigger_field(lex->sphead->
instructions(), instructions(),
lex->spcont, lex->spcont,
......
...@@ -895,6 +895,23 @@ Dbtup::disk_page_tup_scan_callback(Signal* signal, Uint32 scanPtrI, Uint32 page_ ...@@ -895,6 +895,23 @@ Dbtup::disk_page_tup_scan_callback(Signal* signal, Uint32 scanPtrI, Uint32 page_
void void
Dbtup::scanClose(Signal* signal, ScanOpPtr scanPtr) Dbtup::scanClose(Signal* signal, ScanOpPtr scanPtr)
{ {
ScanOp& scan = *scanPtr.p;
ndbrequire(! (scan.m_bits & ScanOp::SCAN_LOCK_WAIT) && scan.m_accLockOp == RNIL);
// unlock all not unlocked by LQH
LocalDLFifoList<ScanLock> list(c_scanLockPool, scan.m_accLockOps);
ScanLockPtr lockPtr;
while (list.first(lockPtr)) {
jam();
AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
lockReq->returnCode = RNIL;
lockReq->requestInfo = AccLockReq::Abort;
lockReq->accOpPtr = lockPtr.p->m_accLockOp;
EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength);
jamEntry();
ndbrequire(lockReq->returnCode == AccLockReq::Success);
list.release(lockPtr);
}
// send conf
NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
conf->scanPtr = scanPtr.p->m_userPtr; conf->scanPtr = scanPtr.p->m_userPtr;
conf->accOperationPtr = RNIL; conf->accOperationPtr = RNIL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment