Commit 4af95eb7 authored by jonas@perch.ndb.mysql.com's avatar jonas@perch.ndb.mysql.com

Merge perch.ndb.mysql.com:/home/jonas/src/mysql-5.1-new

into  perch.ndb.mysql.com:/home/jonas/src/mysql-5.1-new-ndb
parents 50b8eb85 b6c58769
CREATE DATABASE IF NOT EXISTS events_test; CREATE DATABASE IF NOT EXISTS events_test;
CREATE DATABASE events_conn1_test2; CREATE DATABASE events_conn1_test2;
CREATE TABLE events_test.fill_it(test_name varchar(20), occur datetime); CREATE TABLE events_test.fill_it1(test_name varchar(20), occur datetime);
CREATE TABLE events_test.fill_it2(test_name varchar(20), occur datetime);
CREATE TABLE events_test.fill_it3(test_name varchar(20), occur datetime);
CREATE USER event_user2@localhost; CREATE USER event_user2@localhost;
CREATE DATABASE events_conn2_db; CREATE DATABASE events_conn2_db;
GRANT ALL ON *.* TO event_user2@localhost; GRANT ALL ON *.* TO event_user2@localhost;
...@@ -57,5 +59,7 @@ SET GLOBAL event_scheduler=2; ...@@ -57,5 +59,7 @@ SET GLOBAL event_scheduler=2;
DROP DATABASE events_conn1_test4; DROP DATABASE events_conn1_test4;
SET GLOBAL event_scheduler=1; SET GLOBAL event_scheduler=1;
USE events_test; USE events_test;
DROP TABLE fill_it; DROP TABLE fill_it1;
DROP TABLE fill_it2;
DROP TABLE fill_it3;
DROP DATABASE events_test; DROP DATABASE events_test;
drop table if exists t1,t2; drop table if exists t1,t2,t3;
drop table if exists t1,t2; drop table if exists t1,t2,t3;
CREATE TABLE t3 (dummy INT PRIMARY KEY) ENGINE = NDB;
DROP TABLE t3;
reset master; reset master;
reset master; reset master;
CREATE TABLE t2 (a INT PRIMARY KEY, b int) ENGINE = NDB; CREATE TABLE t2 (a INT PRIMARY KEY, b int) ENGINE = NDB;
......
...@@ -3,7 +3,11 @@ CREATE DATABASE IF NOT EXISTS events_test; ...@@ -3,7 +3,11 @@ CREATE DATABASE IF NOT EXISTS events_test;
# DROP DATABASE test start (bug #16406) # DROP DATABASE test start (bug #16406)
# #
CREATE DATABASE events_conn1_test2; CREATE DATABASE events_conn1_test2;
CREATE TABLE events_test.fill_it(test_name varchar(20), occur datetime); # BUG#20676: MySQL in debug mode has a limit of 100 waiters
# (in mysys/thr_lock.c), so use three different tables to insert into.
CREATE TABLE events_test.fill_it1(test_name varchar(20), occur datetime);
CREATE TABLE events_test.fill_it2(test_name varchar(20), occur datetime);
CREATE TABLE events_test.fill_it3(test_name varchar(20), occur datetime);
CREATE USER event_user2@localhost; CREATE USER event_user2@localhost;
CREATE DATABASE events_conn2_db; CREATE DATABASE events_conn2_db;
GRANT ALL ON *.* TO event_user2@localhost; GRANT ALL ON *.* TO event_user2@localhost;
...@@ -16,7 +20,7 @@ connect (conn2,localhost,event_user2,,events_conn2_db); ...@@ -16,7 +20,7 @@ connect (conn2,localhost,event_user2,,events_conn2_db);
let $1= 50; let $1= 50;
while ($1) while ($1)
{ {
eval CREATE EVENT conn2_ev$1 ON SCHEDULE EVERY 1 SECOND DO INSERT INTO events_test.fill_it VALUES("conn2_ev$1", NOW()); eval CREATE EVENT conn2_ev$1 ON SCHEDULE EVERY 1 SECOND DO INSERT INTO events_test.fill_it1 VALUES("conn2_ev$1", NOW());
dec $1; dec $1;
} }
--enable_query_log --enable_query_log
...@@ -26,7 +30,7 @@ connect (conn3,localhost,event_user3,,events_conn3_db); ...@@ -26,7 +30,7 @@ connect (conn3,localhost,event_user3,,events_conn3_db);
let $1= 50; let $1= 50;
while ($1) while ($1)
{ {
eval CREATE EVENT conn3_ev$1 ON SCHEDULE EVERY 1 SECOND DO INSERT INTO events_test.fill_it VALUES("conn3_ev$1", NOW()); eval CREATE EVENT conn3_ev$1 ON SCHEDULE EVERY 1 SECOND DO INSERT INTO events_test.fill_it1 VALUES("conn3_ev$1", NOW());
dec $1; dec $1;
} }
--enable_query_log --enable_query_log
...@@ -48,7 +52,7 @@ USE events_conn1_test2; ...@@ -48,7 +52,7 @@ USE events_conn1_test2;
let $1= 50; let $1= 50;
while ($1) while ($1)
{ {
eval CREATE EVENT conn1_round1_ev$1 ON SCHEDULE EVERY 1 SECOND DO INSERT INTO events_test.fill_it VALUES("conn1_round1_ev$1", NOW()); eval CREATE EVENT conn1_round1_ev$1 ON SCHEDULE EVERY 1 SECOND DO INSERT INTO events_test.fill_it2 VALUES("conn1_round1_ev$1", NOW());
dec $1; dec $1;
} }
--enable_query_log --enable_query_log
...@@ -65,7 +69,7 @@ USE events_conn1_test3; ...@@ -65,7 +69,7 @@ USE events_conn1_test3;
let $1= 50; let $1= 50;
while ($1) while ($1)
{ {
eval CREATE EVENT conn1_round2_ev$1 ON SCHEDULE EVERY 1 SECOND DO INSERT INTO events_test.fill_it VALUES("conn1_round2_ev$1", NOW()); eval CREATE EVENT conn1_round2_ev$1 ON SCHEDULE EVERY 1 SECOND DO INSERT INTO events_test.fill_it2 VALUES("conn1_round2_ev$1", NOW());
dec $1; dec $1;
} }
--enable_query_log --enable_query_log
...@@ -77,7 +81,7 @@ USE events_conn1_test4; ...@@ -77,7 +81,7 @@ USE events_conn1_test4;
let $1= 50; let $1= 50;
while ($1) while ($1)
{ {
eval CREATE EVENT conn1_round3_ev$1 ON SCHEDULE EVERY 1 SECOND DO INSERT INTO events_test.fill_it VALUES("conn1_round3_ev$1", NOW()); eval CREATE EVENT conn1_round3_ev$1 ON SCHEDULE EVERY 1 SECOND DO INSERT INTO events_test.fill_it3 VALUES("conn1_round3_ev$1", NOW());
dec $1; dec $1;
} }
--enable_query_log --enable_query_log
...@@ -88,7 +92,7 @@ USE events_conn1_test2; ...@@ -88,7 +92,7 @@ USE events_conn1_test2;
let $1= 50; let $1= 50;
while ($1) while ($1)
{ {
eval CREATE EVENT ev_round4_drop$1 ON SCHEDULE EVERY 1 SECOND DO INSERT INTO events_test.fill_it VALUES("conn1_round4_ev$1", NOW()); eval CREATE EVENT ev_round4_drop$1 ON SCHEDULE EVERY 1 SECOND DO INSERT INTO events_test.fill_it3 VALUES("conn1_round4_ev$1", NOW());
dec $1; dec $1;
} }
--enable_query_log --enable_query_log
...@@ -115,7 +119,9 @@ reap; ...@@ -115,7 +119,9 @@ reap;
disconnect conn3; disconnect conn3;
connection default; connection default;
USE events_test; USE events_test;
DROP TABLE fill_it; DROP TABLE fill_it1;
DROP TABLE fill_it2;
DROP TABLE fill_it3;
--disable_query_log --disable_query_log
DROP USER event_user2@localhost; DROP USER event_user2@localhost;
DROP USER event_user3@localhost; DROP USER event_user3@localhost;
......
...@@ -4,11 +4,18 @@ ...@@ -4,11 +4,18 @@
--disable_warnings --disable_warnings
connection server2; connection server2;
drop table if exists t1,t2; drop table if exists t1,t2,t3;
connection server1; connection server1;
drop table if exists t1,t2; drop table if exists t1,t2,t3;
--enable_warnings --enable_warnings
# Dummy table create/drop to avoid a race where table is created
# before event subscription is set up, causing test failure (BUG#20677).
connection server2;
CREATE TABLE t3 (dummy INT PRIMARY KEY) ENGINE = NDB;
connection server1;
DROP TABLE t3;
# reset for test # reset for test
connection server1; connection server1;
reset master; reset master;
......
...@@ -311,8 +311,10 @@ ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share, ...@@ -311,8 +311,10 @@ ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share,
if (!reopen) if (!reopen)
{ {
// allocate memory on ndb share so it can be reused after online alter table // allocate memory on ndb share so it can be reused after online alter table
share->record[0]= (byte*) alloc_root(&share->mem_root, table->s->rec_buff_length); (void)multi_alloc_root(&share->mem_root,
share->record[1]= (byte*) alloc_root(&share->mem_root, table->s->rec_buff_length); &(share->record[0]), table->s->rec_buff_length,
&(share->record[1]), table->s->rec_buff_length,
NULL);
} }
{ {
my_ptrdiff_t row_offset= share->record[0] - table->record[0]; my_ptrdiff_t row_offset= share->record[0] - table->record[0];
...@@ -2159,6 +2161,9 @@ int ndb_add_binlog_index(THD *thd, void *_row) ...@@ -2159,6 +2161,9 @@ int ndb_add_binlog_index(THD *thd, void *_row)
break; break;
} }
// Set all fields non-null.
if(binlog_index->s->null_bytes > 0)
bzero(binlog_index->record[0], binlog_index->s->null_bytes);
binlog_index->field[0]->store(row.master_log_pos); binlog_index->field[0]->store(row.master_log_pos);
binlog_index->field[1]->store(row.master_log_file, binlog_index->field[1]->store(row.master_log_file,
strlen(row.master_log_file), strlen(row.master_log_file),
...@@ -3275,6 +3280,13 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) ...@@ -3275,6 +3280,13 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
thd= new THD; /* note that contructor of THD uses DBUG_ */ thd= new THD; /* note that contructor of THD uses DBUG_ */
THD_CHECK_SENTRY(thd); THD_CHECK_SENTRY(thd);
/* We need to set thd->thread_id before thd->store_globals, or it will
set an invalid value for thd->variables.pseudo_thread_id.
*/
pthread_mutex_lock(&LOCK_thread_count);
thd->thread_id= thread_id++;
pthread_mutex_unlock(&LOCK_thread_count);
thd->thread_stack= (char*) &thd; /* remember where our stack is */ thd->thread_stack= (char*) &thd; /* remember where our stack is */
if (thd->store_globals()) if (thd->store_globals())
{ {
...@@ -3307,7 +3319,6 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) ...@@ -3307,7 +3319,6 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
pthread_detach_this_thread(); pthread_detach_this_thread();
thd->real_id= pthread_self(); thd->real_id= pthread_self();
pthread_mutex_lock(&LOCK_thread_count); pthread_mutex_lock(&LOCK_thread_count);
thd->thread_id= thread_id++;
threads.append(thd); threads.append(thd);
pthread_mutex_unlock(&LOCK_thread_count); pthread_mutex_unlock(&LOCK_thread_count);
thd->lex->start_transaction_opt= 0; thd->lex->start_transaction_opt= 0;
...@@ -3643,6 +3654,10 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) ...@@ -3643,6 +3654,10 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
injector::transaction::table tbl(table, TRUE); injector::transaction::table tbl(table, TRUE);
int ret= trans.use_table(::server_id, tbl); int ret= trans.use_table(::server_id, tbl);
DBUG_ASSERT(ret == 0); DBUG_ASSERT(ret == 0);
// Set all fields non-null.
if(table->s->null_bytes > 0)
bzero(table->record[0], table->s->null_bytes);
table->field[0]->store((longlong)::server_id); table->field[0]->store((longlong)::server_id);
table->field[1]->store((longlong)gci); table->field[1]->store((longlong)gci);
trans.write_row(::server_id, trans.write_row(::server_id,
......
...@@ -2506,16 +2506,20 @@ my_size_t THD::pack_row(TABLE *table, MY_BITMAP const* cols, byte *row_data, ...@@ -2506,16 +2506,20 @@ my_size_t THD::pack_row(TABLE *table, MY_BITMAP const* cols, byte *row_data,
int n_null_bytes= table->s->null_bytes; int n_null_bytes= table->s->null_bytes;
byte *ptr; byte *ptr;
uint i; uint i;
my_ptrdiff_t const offset= (my_ptrdiff_t) (record - (byte*) my_ptrdiff_t const rec_offset= record - table->record[0];
table->record[0]); my_ptrdiff_t const def_offset= table->s->default_values - table->record[0];
memcpy(row_data, record, n_null_bytes); memcpy(row_data, record, n_null_bytes);
ptr= row_data+n_null_bytes; ptr= row_data+n_null_bytes;
for (i= 0 ; (field= *p_field) ; i++, p_field++) for (i= 0 ; (field= *p_field) ; i++, p_field++)
{ {
if (bitmap_is_set(cols,i)) if (bitmap_is_set(cols,i))
{
my_ptrdiff_t const offset=
field->is_null(rec_offset) ? def_offset : rec_offset;
ptr= (byte*)field->pack((char *) ptr, field->ptr + offset); ptr= (byte*)field->pack((char *) ptr, field->ptr + offset);
} }
}
return (static_cast<my_size_t>(ptr - row_data)); return (static_cast<my_size_t>(ptr - row_data));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment