Commit 89784874 authored by Rich Prohaska's avatar Rich Prohaska

DB-714 read free replication

parent 3c64729c
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, primary key(a)) engine=tokudb;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
include/diff_tables.inc [master:test.t, slave:test.t]
delete from t where a=2;
select unix_timestamp() into @tstart;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5;
@tend-@tstart <= 5
1
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, primary key(a)) engine=tokudb;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
include/diff_tables.inc [master:test.t, slave:test.t]
delete from t where a=2;
select unix_timestamp() into @tstart;
select unix_timestamp() into @tend;
select @tend-@tstart > 5;
@tend-@tstart > 5
1
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
insert into t values (4,0);
include/diff_tables.inc [master:test.t, slave:test.t]
update t set b=b+1 where a=2;
update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5;
@tend-@tstart <= 5
1
select * from t;
a b
1 3
2 2
3 5
4 3
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
insert into t values (4,0);
include/diff_tables.inc [master:test.t, slave:test.t]
update t set b=b+1 where a=2;
update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5;
@tend-@tstart <= 5
0
select * from t;
a b
1 3
2 2
3 5
4 3
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
insert into t values (4,0);
include/diff_tables.inc [master:test.t, slave:test.t]
update t set b=b+1 where a=2;
update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5;
@tend-@tstart <= 5
0
select * from t;
a b
1 3
2 2
3 5
4 3
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, b bigint not null, primary key(a)) engine=tokudb;
insert into t values (1,0);
insert into t values (2,0),(3,0);
insert into t values (4,0);
include/diff_tables.inc [master:test.t, slave:test.t]
update t set b=b+1 where a=2;
update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5;
@tend-@tstart <= 5
0
select * from t;
a b
1 3
2 2
3 5
4 3
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, b bigint not null, c bigint not null, primary key(a), unique key(c)) engine=tokudb;
insert into t values (1,0,-1);
insert into t values (2,0,-2),(3,0,-3);
insert into t values (4,0,-4);
include/diff_tables.inc [master:test.t, slave:test.t]
update t set b=b+1 where a=2;
update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5;
@tend-@tstart <= 5
1
select * from t;
a b c
1 3 -1
2 2 -2
3 5 -3
4 3 -4
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, b bigint not null, c bigint not null, primary key(a), unique key(c)) engine=tokudb;
insert into t values (1,0,-1);
insert into t values (2,0,-2),(3,0,-3);
insert into t values (4,0,-4);
include/diff_tables.inc [master:test.t, slave:test.t]
update t set b=b+1 where a=2;
update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5;
@tend-@tstart <= 5
0
select * from t;
a b c
1 3 -1
2 2 -2
3 5 -3
4 3 -4
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
select unix_timestamp()-@tstart <= 10;
unix_timestamp()-@tstart <= 10
1
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
select unix_timestamp()-@tstart <= 10;
unix_timestamp()-@tstart <= 10
0
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, b bigint not null, primary key(a), unique key(b)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1,2);
insert into t values (2,3),(3,4);
insert into t values (4,5);
select unix_timestamp()-@tstart <= 10;
unix_timestamp()-@tstart <= 10
1
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, b bigint not null, primary key(a), unique key(b)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1,2);
insert into t values (2,3),(3,4);
insert into t values (4,5);
select unix_timestamp()-@tstart <= 10;
unix_timestamp()-@tstart <= 10
0
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=OFF
# test replicated delete rows log events on a table with a primary key.
# the slave is read only with tokudb rpl row lookups OFF.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, primary key(a)) engine=$engine;
# show create table t;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
# wait for the inserts to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# delete a row
connection master;
delete from t where a=2;
select unix_timestamp() into @tstart;
# wait for the delete to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5; # assert no delay in the delete time
# diff tables
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-unique-checks-delay=0 --tokudb-rpl-unique-checks=ON --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=ON
# test replicated delete rows log events on a table with a primary key.
# the slave is read only with tokudb rpl row lookups ON.
# this will cause SLOW deletes.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, primary key(a)) engine=$engine;
# show create table t;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
# wait for the inserts to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# delete a row
connection master;
delete from t where a=2;
select unix_timestamp() into @tstart;
# wait for the delete to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp() into @tend;
select @tend-@tstart > 5; # assert big delay in the delete time
# diff tables
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=OFF
# test replicated update rows log events on a table with a primary key.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, b bigint not null, primary key(a)) engine=$engine;
# show create table t;
insert into t values (1,0);
insert into t values (2,0),(3,0);
insert into t values (4,0);
# wait for the inserts to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# delete a row
connection master;
update t set b=b+1 where a=2;
update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
# wait for the delete to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5; # assert no delay in the delete time
connection slave;
select * from t;
# diff tables
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=ON
# test replicated update rows log events on a table with a primary key.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, b bigint not null, primary key(a)) engine=$engine;
# show create table t;
insert into t values (1,0);
insert into t values (2,0),(3,0);
insert into t values (4,0);
# wait for the inserts to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# delete a row
connection master;
update t set b=b+1 where a=2;
update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
# wait for the delete to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5; # assert no delay in the delete time
connection slave;
select * from t;
# diff tables
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=ON --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=OFF
# test replicated update rows log events on a table with a primary key.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, b bigint not null, primary key(a)) engine=$engine;
# show create table t;
insert into t values (1,0);
insert into t values (2,0),(3,0);
insert into t values (4,0);
# wait for the inserts to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# delete a row
connection master;
update t set b=b+1 where a=2;
update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
# wait for the delete to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5; # assert no delay in the delete time
connection slave;
select * from t;
# diff tables
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=ON --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=ON
# test replicated update rows log events on a table with a primary key.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, b bigint not null, primary key(a)) engine=$engine;
# show create table t;
insert into t values (1,0);
insert into t values (2,0),(3,0);
insert into t values (4,0);
# wait for the inserts to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# delete a row
connection master;
update t set b=b+1 where a=2;
update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
# wait for the delete to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5; # assert no delay in the delete time
connection slave;
select * from t;
# diff tables
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=OFF
# test replicated update rows log events on a table with a primary key.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, b bigint not null, c bigint not null, primary key(a), unique key(c)) engine=$engine;
# show create table t;
insert into t values (1,0,-1);
insert into t values (2,0,-2),(3,0,-3);
insert into t values (4,0,-4);
# wait for the inserts to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# delete a row
connection master;
update t set b=b+1 where a=2;
update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
# wait for the delete to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5; # assert no delay in the delete time
connection slave;
select * from t;
# diff tables
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=OFF --tokudb-rpl-lookup-rows-delay=10000 --tokudb-rpl-lookup-rows=ON
# test replicated update rows log events on a table with a primary key.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, b bigint not null, c bigint not null, primary key(a), unique key(c)) engine=$engine;
# show create table t;
insert into t values (1,0,-1);
insert into t values (2,0,-2),(3,0,-3);
insert into t values (4,0,-4);
# wait for the inserts to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# delete a row
connection master;
update t set b=b+1 where a=2;
update t set b=b+2 where a=1;
update t set b=b+3 where a=4;
update t set b=b+4 where a=3;
update t set b=b+1 where 1<=a and a<=3;
select unix_timestamp() into @tstart;
# wait for the delete to finish on the slave
connection master;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp() into @tend;
select @tend-@tstart <= 5; # assert no delay in the delete time
connection slave;
select * from t;
# diff tables
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF
# test replicated write rows log events on a table with a primary key.
# the slave is read only with tokudb unique checks disabled.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, primary key(a)) engine=$engine;
# show create table t;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp()-@tstart <= 10;
connection slave;
# insert into t values (5); # test read-only
# show create table t;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-unique-checks-delay=10000 --tokudb-rpl-unique-checks=ON
# test replicated write rows log events on a table with a primary key.
# the slave is read only with tokudb unique checks disabled.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, primary key(a)) engine=$engine;
# show create table t;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp()-@tstart <= 10;
connection slave;
# insert into t values (5); # test read-only
# show create table t;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF
# test replicated write rows log events on a table with a primary key and a unique secondary key.
# the slave is read only with tokudb unique checks disabled.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, b bigint not null, primary key(a), unique key(b)) engine=$engine;
# show create table t;
select unix_timestamp() into @tstart;
insert into t values (1,2);
insert into t values (2,3),(3,4);
insert into t values (4,5);
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp()-@tstart <= 10;
connection slave;
# show create table t;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=ON
# test replicated write rows log events on a table with a primary key and a unique secondary key.
# the slave is read only with tokudb unique checks disabled.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, b bigint not null, primary key(a), unique key(b)) engine=$engine;
# show create table t;
select unix_timestamp() into @tstart;
insert into t values (1,2);
insert into t values (2,3),(3,4);
insert into t values (4,5);
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp()-@tstart <= 10;
connection slave;
# show create table t;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
...@@ -1249,6 +1249,7 @@ ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, t ...@@ -1249,6 +1249,7 @@ ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, t
tokudb_active_index = MAX_KEY; tokudb_active_index = MAX_KEY;
invalidate_icp(); invalidate_icp();
trx_handler_list.data = this; trx_handler_list.data = this;
in_rpl_write_rows = in_rpl_delete_rows = in_rpl_update_rows = false;
TOKUDB_HANDLER_DBUG_VOID_RETURN; TOKUDB_HANDLER_DBUG_VOID_RETURN;
} }
...@@ -3550,12 +3551,27 @@ cleanup: ...@@ -3550,12 +3551,27 @@ cleanup:
return error; return error;
} }
static void maybe_do_unique_checks_delay(THD *thd) {
if (thd->slave_thread) {
uint64_t delay_ms = THDVAR(thd, rpl_unique_checks_delay);
if (delay_ms)
usleep(delay_ms * 1000);
}
}
static bool do_unique_checks(THD *thd, bool do_rpl_event) {
if (do_rpl_event && thd->slave_thread && opt_readonly && !THDVAR(thd, rpl_unique_checks))
return false;
else
return !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS);
}
int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) { int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) {
int error; int error = 0;
// //
// first do uniqueness checks // first do uniqueness checks
// //
if (share->has_unique_keys && !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) { if (share->has_unique_keys && do_unique_checks(thd, in_rpl_write_rows)) {
for (uint keynr = 0; keynr < table_share->keys; keynr++) { for (uint keynr = 0; keynr < table_share->keys; keynr++) {
bool is_unique_key = (table->key_info[keynr].flags & HA_NOSAME) || (keynr == primary_key); bool is_unique_key = (table->key_info[keynr].flags & HA_NOSAME) || (keynr == primary_key);
bool is_unique = false; bool is_unique = false;
...@@ -3568,13 +3584,18 @@ int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) { ...@@ -3568,13 +3584,18 @@ int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) {
if (!is_unique_key) { if (!is_unique_key) {
continue; continue;
} }
maybe_do_unique_checks_delay(thd);
// //
// if unique key, check uniqueness constraint // if unique key, check uniqueness constraint
// but, we do not need to check it if the key has a null // but, we do not need to check it if the key has a null
// and we do not need to check it if unique_checks is off // and we do not need to check it if unique_checks is off
// //
error = is_val_unique(&is_unique, record, &table->key_info[keynr], keynr, txn); error = is_val_unique(&is_unique, record, &table->key_info[keynr], keynr, txn);
if (error) { goto cleanup; } if (error) {
goto cleanup;
}
if (!is_unique) { if (!is_unique) {
error = DB_KEYEXIST; error = DB_KEYEXIST;
last_dup_key = keynr; last_dup_key = keynr;
...@@ -3582,7 +3603,6 @@ int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) { ...@@ -3582,7 +3603,6 @@ int ha_tokudb::do_uniqueness_checks(uchar* record, DB_TXN* txn, THD* thd) {
} }
} }
} }
error = 0;
cleanup: cleanup:
return error; return error;
} }
...@@ -3685,15 +3705,8 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) { ...@@ -3685,15 +3705,8 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) {
tokudb_my_free(tmp_pk_val_data); tokudb_my_free(tmp_pk_val_data);
} }
//
// set the put flags for the main dictionary // set the put flags for the main dictionary
// void ha_tokudb::set_main_dict_put_flags(THD* thd, bool opt_eligible, uint32_t* put_flags) {
void ha_tokudb::set_main_dict_put_flags(
THD* thd,
bool opt_eligible,
uint32_t* put_flags
)
{
uint32_t old_prelock_flags = 0; uint32_t old_prelock_flags = 0;
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key); uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
bool in_hot_index = share->num_DBs > curr_num_DBs; bool in_hot_index = share->num_DBs > curr_num_DBs;
...@@ -3713,8 +3726,7 @@ void ha_tokudb::set_main_dict_put_flags( ...@@ -3713,8 +3726,7 @@ void ha_tokudb::set_main_dict_put_flags(
{ {
*put_flags = old_prelock_flags; *put_flags = old_prelock_flags;
} }
else if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS) else if (!do_unique_checks(thd, in_rpl_write_rows | in_rpl_update_rows) && !is_replace_into(thd) && !is_insert_ignore(thd))
&& !is_replace_into(thd) && !is_insert_ignore(thd))
{ {
*put_flags = old_prelock_flags; *put_flags = old_prelock_flags;
} }
...@@ -3736,22 +3748,18 @@ void ha_tokudb::set_main_dict_put_flags( ...@@ -3736,22 +3748,18 @@ void ha_tokudb::set_main_dict_put_flags(
int ha_tokudb::insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk_val, DB_TXN* txn) { int ha_tokudb::insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk_val, DB_TXN* txn) {
int error = 0; int error = 0;
uint32_t put_flags = mult_put_flags[primary_key];
THD *thd = ha_thd();
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key); uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
assert(curr_num_DBs == 1); assert(curr_num_DBs == 1);
uint32_t put_flags = mult_put_flags[primary_key];
THD *thd = ha_thd();
set_main_dict_put_flags(thd, true, &put_flags); set_main_dict_put_flags(thd, true, &put_flags);
error = share->file->put( // for test, make unique checks have a very long duration
share->file, if ((put_flags & DB_OPFLAGS_MASK) == DB_NOOVERWRITE)
txn, maybe_do_unique_checks_delay(thd);
pk_key,
pk_val,
put_flags
);
error = share->file->put(share->file, txn, pk_key, pk_val, put_flags);
if (error) { if (error) {
last_dup_key = primary_key; last_dup_key = primary_key;
goto cleanup; goto cleanup;
...@@ -3765,14 +3773,18 @@ int ha_tokudb::insert_rows_to_dictionaries_mult(DBT* pk_key, DBT* pk_val, DB_TXN ...@@ -3765,14 +3773,18 @@ int ha_tokudb::insert_rows_to_dictionaries_mult(DBT* pk_key, DBT* pk_val, DB_TXN
int error = 0; int error = 0;
uint curr_num_DBs = share->num_DBs; uint curr_num_DBs = share->num_DBs;
set_main_dict_put_flags(thd, true, &mult_put_flags[primary_key]); set_main_dict_put_flags(thd, true, &mult_put_flags[primary_key]);
uint32_t i, flags = mult_put_flags[primary_key]; uint32_t flags = mult_put_flags[primary_key];
// for test, make unique checks have a very long duration
if ((flags & DB_OPFLAGS_MASK) == DB_NOOVERWRITE)
maybe_do_unique_checks_delay(thd);
// the insert ignore optimization uses DB_NOOVERWRITE_NO_ERROR, // the insert ignore optimization uses DB_NOOVERWRITE_NO_ERROR,
// which is not allowed with env->put_multiple. // which is not allowed with env->put_multiple.
// we have to insert the rows one by one in this case. // we have to insert the rows one by one in this case.
if (flags & DB_NOOVERWRITE_NO_ERROR) { if (flags & DB_NOOVERWRITE_NO_ERROR) {
DB * src_db = share->key_file[primary_key]; DB * src_db = share->key_file[primary_key];
for (i = 0; i < curr_num_DBs; i++) { for (uint32_t i = 0; i < curr_num_DBs; i++) {
DB * db = share->key_file[i]; DB * db = share->key_file[i];
if (i == primary_key) { if (i == primary_key) {
// if it's the primary key, insert the rows // if it's the primary key, insert the rows
...@@ -3833,7 +3845,7 @@ out: ...@@ -3833,7 +3845,7 @@ out:
// error otherwise // error otherwise
// //
int ha_tokudb::write_row(uchar * record) { int ha_tokudb::write_row(uchar * record) {
TOKUDB_HANDLER_DBUG_ENTER(""); TOKUDB_HANDLER_DBUG_ENTER("%p", record);
DBT row, prim_key; DBT row, prim_key;
int error; int error;
...@@ -3871,10 +3883,7 @@ int ha_tokudb::write_row(uchar * record) { ...@@ -3871,10 +3883,7 @@ int ha_tokudb::write_row(uchar * record) {
if (share->has_auto_inc && record == table->record[0]) { if (share->has_auto_inc && record == table->record[0]) {
tokudb_pthread_mutex_lock(&share->mutex); tokudb_pthread_mutex_lock(&share->mutex);
ulonglong curr_auto_inc = retrieve_auto_increment( ulonglong curr_auto_inc = retrieve_auto_increment(
table->field[share->ai_field_index]->key_type(), table->field[share->ai_field_index]->key_type(), field_offset(table->field[share->ai_field_index], table), record);
field_offset(table->field[share->ai_field_index], table),
record
);
if (curr_auto_inc > share->last_auto_increment) { if (curr_auto_inc > share->last_auto_increment) {
share->last_auto_increment = curr_auto_inc; share->last_auto_increment = curr_auto_inc;
if (delay_updating_ai_metadata) { if (delay_updating_ai_metadata) {
...@@ -4042,7 +4051,6 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) { ...@@ -4042,7 +4051,6 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
memset((void *) &prim_row, 0, sizeof(prim_row)); memset((void *) &prim_row, 0, sizeof(prim_row));
memset((void *) &old_prim_row, 0, sizeof(old_prim_row)); memset((void *) &old_prim_row, 0, sizeof(old_prim_row));
ha_statistic_increment(&SSV::ha_update_count); ha_statistic_increment(&SSV::ha_update_count);
#if MYSQL_VERSION_ID < 50600 #if MYSQL_VERSION_ID < 50600
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) { if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) {
...@@ -4089,7 +4097,6 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) { ...@@ -4089,7 +4097,6 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
} }
txn = using_ignore ? sub_trans : transaction; txn = using_ignore ? sub_trans : transaction;
if (hidden_primary_key) { if (hidden_primary_key) {
memset((void *) &prim_key, 0, sizeof(prim_key)); memset((void *) &prim_key, 0, sizeof(prim_key));
prim_key.data = (void *) current_ident; prim_key.data = (void *) current_ident;
...@@ -4101,10 +4108,8 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) { ...@@ -4101,10 +4108,8 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
create_dbt_key_from_table(&old_prim_key, primary_key, primary_key_buff, old_row, &has_null); create_dbt_key_from_table(&old_prim_key, primary_key, primary_key_buff, old_row, &has_null);
} }
//
// do uniqueness checks // do uniqueness checks
// if (share->has_unique_keys && do_unique_checks(thd, in_rpl_update_rows)) {
if (share->has_unique_keys && !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) {
for (uint keynr = 0; keynr < table_share->keys; keynr++) { for (uint keynr = 0; keynr < table_share->keys; keynr++) {
bool is_unique_key = (table->key_info[keynr].flags & HA_NOSAME) || (keynr == primary_key); bool is_unique_key = (table->key_info[keynr].flags & HA_NOSAME) || (keynr == primary_key);
if (keynr == primary_key && !share->pk_has_string) { if (keynr == primary_key && !share->pk_has_string) {
...@@ -4145,6 +4150,10 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) { ...@@ -4145,6 +4150,10 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
set_main_dict_put_flags(thd, false, &mult_put_flags[primary_key]); set_main_dict_put_flags(thd, false, &mult_put_flags[primary_key]);
// for test, make unique checks have a very long duration
if ((mult_put_flags[primary_key] & DB_OPFLAGS_MASK) == DB_NOOVERWRITE)
maybe_do_unique_checks_delay(thd);
error = db_env->update_multiple( error = db_env->update_multiple(
db_env, db_env,
share->key_file[primary_key], share->key_file[primary_key],
...@@ -5616,13 +5625,11 @@ DBT *ha_tokudb::get_pos(DBT * to, uchar * pos) { ...@@ -5616,13 +5625,11 @@ DBT *ha_tokudb::get_pos(DBT * to, uchar * pos) {
DBUG_RETURN(to); DBUG_RETURN(to);
} }
//
// Retrieves a row with based on the primary key saved in pos // Retrieves a row with based on the primary key saved in pos
// Returns: // Returns:
// 0 on success // 0 on success
// HA_ERR_KEY_NOT_FOUND if not found // HA_ERR_KEY_NOT_FOUND if not found
// error otherwise // error otherwise
//
int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) { int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) {
TOKUDB_HANDLER_DBUG_ENTER(""); TOKUDB_HANDLER_DBUG_ENTER("");
DBT db_pos; DBT db_pos;
...@@ -5635,12 +5642,20 @@ int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) { ...@@ -5635,12 +5642,20 @@ int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) {
ha_statistic_increment(&SSV::ha_read_rnd_count); ha_statistic_increment(&SSV::ha_read_rnd_count);
tokudb_active_index = MAX_KEY; tokudb_active_index = MAX_KEY;
// test rpl slave by inducing a delay before the point query
THD *thd = ha_thd();
if (thd->slave_thread && (in_rpl_delete_rows || in_rpl_update_rows)) {
uint64_t delay_ms = THDVAR(thd, rpl_lookup_rows_delay);
if (delay_ms)
usleep(delay_ms * 1000);
}
info.ha = this; info.ha = this;
info.buf = buf; info.buf = buf;
info.keynr = primary_key; info.keynr = primary_key;
error = share->file->getf_set(share->file, transaction, error = share->file->getf_set(share->file, transaction,
get_cursor_isolation_flags(lock.type, ha_thd()), get_cursor_isolation_flags(lock.type, thd),
key, smart_dbt_callback_rowread_ptquery, &info); key, smart_dbt_callback_rowread_ptquery, &info);
if (error == DB_NOTFOUND) { if (error == DB_NOTFOUND) {
...@@ -8167,6 +8182,37 @@ void ha_tokudb::remove_from_trx_handler_list() { ...@@ -8167,6 +8182,37 @@ void ha_tokudb::remove_from_trx_handler_list() {
trx->handlers = list_delete(trx->handlers, &trx_handler_list); trx->handlers = list_delete(trx->handlers, &trx_handler_list);
} }
void ha_tokudb::rpl_before_write_rows() {
in_rpl_write_rows = true;
}
void ha_tokudb::rpl_after_write_rows() {
in_rpl_write_rows = false;
}
void ha_tokudb::rpl_before_delete_rows() {
in_rpl_delete_rows = true;
}
void ha_tokudb::rpl_after_delete_rows() {
in_rpl_delete_rows = false;
}
void ha_tokudb::rpl_before_update_rows() {
in_rpl_update_rows = true;
}
void ha_tokudb::rpl_after_update_rows() {
in_rpl_update_rows = false;
}
bool ha_tokudb::rpl_lookup_rows() {
if (!in_rpl_delete_rows && !in_rpl_update_rows)
return true;
else
return THDVAR(ha_thd(), rpl_lookup_rows);
}
// table admin // table admin
#include "ha_tokudb_admin.cc" #include "ha_tokudb_admin.cc"
......
...@@ -799,6 +799,19 @@ private: ...@@ -799,6 +799,19 @@ private:
private: private:
int do_optimize(THD *thd); int do_optimize(THD *thd);
int map_to_handler_error(int error); int map_to_handler_error(int error);
public:
void rpl_before_write_rows();
void rpl_after_write_rows();
void rpl_before_delete_rows();
void rpl_after_delete_rows();
void rpl_before_update_rows();
void rpl_after_update_rows();
bool rpl_lookup_rows();
private:
bool in_rpl_write_rows;
bool in_rpl_delete_rows;
bool in_rpl_update_rows;
}; };
#if TOKU_INCLUDE_OPTION_STRUCTS #if TOKU_INCLUDE_OPTION_STRUCTS
......
...@@ -1442,6 +1442,10 @@ static struct st_mysql_sys_var *tokudb_system_variables[] = { ...@@ -1442,6 +1442,10 @@ static struct st_mysql_sys_var *tokudb_system_variables[] = {
#if TOKU_INCLUDE_XA #if TOKU_INCLUDE_XA
MYSQL_SYSVAR(support_xa), MYSQL_SYSVAR(support_xa),
#endif #endif
MYSQL_SYSVAR(rpl_unique_checks),
MYSQL_SYSVAR(rpl_unique_checks_delay),
MYSQL_SYSVAR(rpl_lookup_rows),
MYSQL_SYSVAR(rpl_lookup_rows_delay),
NULL NULL
}; };
......
...@@ -504,17 +504,20 @@ static TYPELIB tokudb_empty_scan_typelib = { ...@@ -504,17 +504,20 @@ static TYPELIB tokudb_empty_scan_typelib = {
NULL NULL
}; };
static MYSQL_THDVAR_ENUM(empty_scan, static MYSQL_THDVAR_ENUM(empty_scan, PLUGIN_VAR_OPCMDARG,
PLUGIN_VAR_OPCMDARG,
"TokuDB algorithm to check if the table is empty when opened. ", "TokuDB algorithm to check if the table is empty when opened. ",
NULL, NULL, TOKUDB_EMPTY_SCAN_RL, &tokudb_empty_scan_typelib NULL, NULL, TOKUDB_EMPTY_SCAN_RL, &tokudb_empty_scan_typelib
); );
#if TOKUDB_CHECK_JEMALLOC #if TOKUDB_CHECK_JEMALLOC
static uint tokudb_check_jemalloc; static uint tokudb_check_jemalloc;
static MYSQL_SYSVAR_UINT(check_jemalloc, tokudb_check_jemalloc, 0, "Check if jemalloc is linked", NULL, NULL, 1, 0, 1, 0); static MYSQL_SYSVAR_UINT(check_jemalloc, tokudb_check_jemalloc, 0, "Check if jemalloc is linked",
NULL, NULL, 1, 0, 1, 0);
#endif #endif
static MYSQL_THDVAR_BOOL(bulk_fetch, PLUGIN_VAR_THDLOCAL, "enable bulk fetch",
NULL /*check*/, NULL /*update*/, true /*default*/);
#if TOKU_INCLUDE_XA #if TOKU_INCLUDE_XA
static MYSQL_THDVAR_BOOL(support_xa, static MYSQL_THDVAR_BOOL(support_xa,
PLUGIN_VAR_OPCMDARG, PLUGIN_VAR_OPCMDARG,
...@@ -525,7 +528,17 @@ static MYSQL_THDVAR_BOOL(support_xa, ...@@ -525,7 +528,17 @@ static MYSQL_THDVAR_BOOL(support_xa,
); );
#endif #endif
static MYSQL_THDVAR_BOOL(bulk_fetch, PLUGIN_VAR_THDLOCAL, "enable bulk fetch", NULL /*check*/, NULL /*update*/, true /*default*/); static MYSQL_THDVAR_BOOL(rpl_unique_checks, PLUGIN_VAR_THDLOCAL, "enable unique checks on replication slave",
NULL /*check*/, NULL /*update*/, true /*default*/);
static MYSQL_THDVAR_ULONGLONG(rpl_unique_checks_delay, PLUGIN_VAR_THDLOCAL, "time in milliseconds to add to unique checks test on replication slave",
NULL, NULL, 0 /*default*/, 0 /*min*/, ~0ULL /*max*/, 1 /*blocksize*/);
static MYSQL_THDVAR_BOOL(rpl_lookup_rows, PLUGIN_VAR_THDLOCAL, "lookup a row on rpl slave",
NULL /*check*/, NULL /*update*/, true /*default*/);
static MYSQL_THDVAR_ULONGLONG(rpl_lookup_rows_delay, PLUGIN_VAR_THDLOCAL, "time in milliseconds to add to lookups on replication slave",
NULL, NULL, 0 /*default*/, 0 /*min*/, ~0ULL /*max*/, 1 /*blocksize*/);
extern HASH tokudb_open_tables; extern HASH tokudb_open_tables;
extern pthread_mutex_t tokudb_mutex; extern pthread_mutex_t tokudb_mutex;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment