Commit 0d13a48c authored by mkindahl@dl145h.mysql.com's avatar mkindahl@dl145h.mysql.com

Merge dl145h.mysql.com:/data0/mkindahl/mysql-5.1-rpl-merge

into  dl145h.mysql.com:/data0/mkindahl/mysql-5.1-new-rpl
parents b1428496 087c239b
...@@ -44,7 +44,8 @@ dist-hook: ...@@ -44,7 +44,8 @@ dist-hook:
$(distdir)/std_data/ndb_backup51 \ $(distdir)/std_data/ndb_backup51 \
$(distdir)/std_data/ndb_backup51_data_be \ $(distdir)/std_data/ndb_backup51_data_be \
$(distdir)/std_data/ndb_backup51_data_le \ $(distdir)/std_data/ndb_backup51_data_le \
$(distdir)/lib $(distdir)/lib \
$(distdir)/lib/My
-$(INSTALL_DATA) $(srcdir)/t/*.def $(distdir)/t -$(INSTALL_DATA) $(srcdir)/t/*.def $(distdir)/t
$(INSTALL_DATA) $(srcdir)/t/*.test $(distdir)/t $(INSTALL_DATA) $(srcdir)/t/*.test $(distdir)/t
-$(INSTALL_DATA) $(srcdir)/t/*.imtest $(distdir)/t -$(INSTALL_DATA) $(srcdir)/t/*.imtest $(distdir)/t
...@@ -57,6 +58,7 @@ dist-hook: ...@@ -57,6 +58,7 @@ dist-hook:
-$(INSTALL_DATA) $(srcdir)/extra/binlog_tests/*.opt $(distdir)/extra/binlog_tests -$(INSTALL_DATA) $(srcdir)/extra/binlog_tests/*.opt $(distdir)/extra/binlog_tests
-$(INSTALL_DATA) $(srcdir)/extra/rpl_tests/*.opt $(distdir)/extra/rpl_tests -$(INSTALL_DATA) $(srcdir)/extra/rpl_tests/*.opt $(distdir)/extra/rpl_tests
$(INSTALL_DATA) $(srcdir)/include/*.inc $(distdir)/include $(INSTALL_DATA) $(srcdir)/include/*.inc $(distdir)/include
$(INSTALL_DATA) $(srcdir)/include/*.sql $(distdir)/include
$(INSTALL_DATA) $(srcdir)/include/*.test $(distdir)/include $(INSTALL_DATA) $(srcdir)/include/*.test $(distdir)/include
$(INSTALL_DATA) $(srcdir)/r/*.result $(srcdir)/r/*.require $(distdir)/r $(INSTALL_DATA) $(srcdir)/r/*.result $(srcdir)/r/*.require $(distdir)/r
$(INSTALL_DATA) $(srcdir)/std_data/Moscow_leap $(distdir)/std_data $(INSTALL_DATA) $(srcdir)/std_data/Moscow_leap $(distdir)/std_data
...@@ -72,6 +74,7 @@ dist-hook: ...@@ -72,6 +74,7 @@ dist-hook:
$(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51_data_be/BACKUP* $(distdir)/std_data/ndb_backup51_data_be $(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51_data_be/BACKUP* $(distdir)/std_data/ndb_backup51_data_be
$(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51_data_le/BACKUP* $(distdir)/std_data/ndb_backup51_data_le $(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51_data_le/BACKUP* $(distdir)/std_data/ndb_backup51_data_le
$(INSTALL_DATA) $(srcdir)/lib/*.pl $(distdir)/lib $(INSTALL_DATA) $(srcdir)/lib/*.pl $(distdir)/lib
$(INSTALL_DATA) $(srcdir)/lib/My/*.pm $(distdir)/lib/My
-rm -rf `find $(distdir)/suite -type d -name SCCS` $(distdir)/suite/row_lock -rm -rf `find $(distdir)/suite -type d -name SCCS` $(distdir)/suite/row_lock
install-data-local: install-data-local:
...@@ -86,7 +89,8 @@ install-data-local: ...@@ -86,7 +89,8 @@ install-data-local:
$(DESTDIR)$(testdir)/std_data/ndb_backup51 \ $(DESTDIR)$(testdir)/std_data/ndb_backup51 \
$(DESTDIR)$(testdir)/std_data/ndb_backup51_data_be \ $(DESTDIR)$(testdir)/std_data/ndb_backup51_data_be \
$(DESTDIR)$(testdir)/std_data/ndb_backup51_data_le \ $(DESTDIR)$(testdir)/std_data/ndb_backup51_data_le \
$(DESTDIR)$(testdir)/lib $(DESTDIR)$(testdir)/lib \
$(DESTDIR)$(testdir)/lib/My
$(INSTALL_DATA) $(srcdir)/README $(DESTDIR)$(testdir) $(INSTALL_DATA) $(srcdir)/README $(DESTDIR)$(testdir)
-$(INSTALL_DATA) $(srcdir)/t/*.def $(DESTDIR)$(testdir)/t -$(INSTALL_DATA) $(srcdir)/t/*.def $(DESTDIR)$(testdir)/t
$(INSTALL_DATA) $(srcdir)/t/*.test $(DESTDIR)$(testdir)/t $(INSTALL_DATA) $(srcdir)/t/*.test $(DESTDIR)$(testdir)/t
...@@ -103,6 +107,7 @@ install-data-local: ...@@ -103,6 +107,7 @@ install-data-local:
-$(INSTALL_DATA) $(srcdir)/extra/binlog_tests/*.opt $(DESTDIR)$(testdir)/extra/binlog_tests -$(INSTALL_DATA) $(srcdir)/extra/binlog_tests/*.opt $(DESTDIR)$(testdir)/extra/binlog_tests
-$(INSTALL_DATA) $(srcdir)/extra/rpl_tests/*.opt $(DESTDIR)$(testdir)/extra/rpl_tests -$(INSTALL_DATA) $(srcdir)/extra/rpl_tests/*.opt $(DESTDIR)$(testdir)/extra/rpl_tests
$(INSTALL_DATA) $(srcdir)/include/*.inc $(DESTDIR)$(testdir)/include $(INSTALL_DATA) $(srcdir)/include/*.inc $(DESTDIR)$(testdir)/include
$(INSTALL_DATA) $(srcdir)/include/*.sql $(DESTDIR)$(testdir)/include
$(INSTALL_DATA) $(srcdir)/include/*.test $(DESTDIR)$(testdir)/include $(INSTALL_DATA) $(srcdir)/include/*.test $(DESTDIR)$(testdir)/include
$(INSTALL_DATA) $(srcdir)/std_data/*.dat $(DESTDIR)$(testdir)/std_data $(INSTALL_DATA) $(srcdir)/std_data/*.dat $(DESTDIR)$(testdir)/std_data
$(INSTALL_DATA) $(srcdir)/std_data/*.*001 $(DESTDIR)$(testdir)/std_data $(INSTALL_DATA) $(srcdir)/std_data/*.*001 $(DESTDIR)$(testdir)/std_data
...@@ -119,6 +124,7 @@ install-data-local: ...@@ -119,6 +124,7 @@ install-data-local:
$(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51_data_be/BACKUP* $(DESTDIR)$(testdir)/std_data/ndb_backup51_data_be $(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51_data_be/BACKUP* $(DESTDIR)$(testdir)/std_data/ndb_backup51_data_be
$(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51_data_le/BACKUP* $(DESTDIR)$(testdir)/std_data/ndb_backup51_data_le $(INSTALL_DATA) $(srcdir)/std_data/ndb_backup51_data_le/BACKUP* $(DESTDIR)$(testdir)/std_data/ndb_backup51_data_le
$(INSTALL_DATA) $(srcdir)/lib/*.pl $(DESTDIR)$(testdir)/lib $(INSTALL_DATA) $(srcdir)/lib/*.pl $(DESTDIR)$(testdir)/lib
$(INSTALL_DATA) $(srcdir)/lib/My/*.pm $(DESTDIR)$(testdir)/lib/My
for f in `(cd $(srcdir); find suite -type f | egrep -v 'SCCS|row_lock')`; \ for f in `(cd $(srcdir); find suite -type f | egrep -v 'SCCS|row_lock')`; \
do \ do \
d=$(DESTDIR)$(testdir)/`dirname $$f`; \ d=$(DESTDIR)$(testdir)/`dirname $$f`; \
......
--source include/have_log_bin.inc
-- require r/have_binlog_format_statement.require
--disable_query_log
--replace_result ROW STATEMENT
show variables like "binlog_format";
--enable_query_log
SET GLOBAL BINLOG_FORMAT=MIXED;
SET SESSION BINLOG_FORMAT=MIXED;
SET GLOBAL BINLOG_FORMAT=ROW;
SET SESSION BINLOG_FORMAT=ROW;
SET GLOBAL BINLOG_FORMAT=STATEMENT;
SET SESSION BINLOG_FORMAT=STATEMENT;
# -*- cperl -*-
package My::Config::Option;
use strict;
use warnings;
sub new {
my ($class, $option_name, $option_value)= @_;
my $self= bless { name => $option_name,
value => $option_value
}, $class;
return $self;
}
sub name {
my ($self)= @_;
return $self->{name};
}
sub value {
my ($self)= @_;
return $self->{value};
}
package My::Config::Group;
use strict;
use warnings;
sub new {
my ($class, $group_name)= @_;
my $self= bless { name => $group_name,
options => [],
options_by_name => {},
}, $class;
return $self;
}
sub insert {
my ($self, $option_name, $value, $if_not_exist)= @_;
my $option= $self->option($option_name);
if (defined($option) and !$if_not_exist) {
$option->{value}= $value;
}
else {
my $option= My::Config::Option->new($option_name, $value);
# Insert option in list
push(@{$self->{options}}, $option);
# Insert option in hash
$self->{options_by_name}->{$option_name}= $option;
}
return $option;
}
sub remove {
my ($self, $option_name)= @_;
# Check that option exists
my $option= $self->option($option_name);
return undef unless defined $option;
# Remove from the hash
delete($self->{options_by_name}->{$option_name}) or die;
# Remove from the array
@{$self->{options}}= grep { $_->name ne $option_name } @{$self->{options}};
return $option;
}
sub options {
my ($self)= @_;
return @{$self->{options}};
}
sub name {
my ($self)= @_;
return $self->{name};
}
#
# Return a specific option in the group
#
sub option {
my ($self, $option_name)= @_;
return $self->{options_by_name}->{$option_name};
}
#
# Return a specific value for an option in the group
#
sub value {
my ($self, $option_name)= @_;
my $option= $self->option($option_name);
die "No option named '$option_name' in this group"
if ! defined($option);
return $option->value();
}
package My::Config;
use strict;
use warnings;
use IO::File;
use File::Basename;
#
# Constructor for My::Config
# - represents a my.cnf config file
#
# Array of arrays
#
sub new {
my ($class, $path)= @_;
my $group_name= undef;
my $self= bless { groups => [] }, $class;
my $F= IO::File->new($path, "<")
or die "Could not open '$path': $!";
while ( my $line= <$F> ) {
chomp($line);
# [group]
if ( $line =~ /\[(.*)\]/ ) {
# New group found
$group_name= $1;
#print "group: $group_name\n";
$self->insert($group_name, undef, undef);
}
# Magic #! comments
elsif ( $line =~ /^#\!/) {
my $magic= $line;
die "Found magic comment '$magic' outside of group"
unless $group_name;
#print "$magic\n";
$self->insert($group_name, $magic, undef);
}
# Comments
elsif ( $line =~ /^#/ || $line =~ /^;/) {
# Skip comment
next;
}
# Empty lines
elsif ( $line =~ /^$/ ) {
# Skip empty lines
next;
}
# !include <filename>
elsif ( $line =~ /^\!include\s*(.*?)\s*$/ ) {
my $include_file_name= dirname($path)."/".$1;
# Check that the file exists
die "The include file '$include_file_name' does not exist"
unless -f $include_file_name;
$self->append(My::Config->new($include_file_name));
}
# <option>
elsif ( $line =~ /^([\@\w-]+)\s*$/ ) {
my $option= $1;
die "Found option '$option' outside of group"
unless $group_name;
#print "$option\n";
$self->insert($group_name, $option, undef);
}
# <option>=<value>
elsif ( $line =~ /^([\@\w-]+)\s*=\s*(.*?)\s*$/ ) {
my $option= $1;
my $value= $2;
die "Found option '$option=$value' outside of group"
unless $group_name;
#print "$option=$value\n";
$self->insert($group_name, $option, $value);
} else {
die "Unexpected line '$line' found in '$path'";
}
}
undef $F; # Close the file
return $self;
}
#
# Insert a new group if it does not already exist
# and add option if defined
#
sub insert {
my ($self, $group_name, $option, $value, $if_not_exist)= @_;
my $group;
# Create empty array for the group if it doesn't exist
if ( !$self->group_exists($group_name) ) {
$group= $self->_group_insert($group_name);
}
else {
$group= $self->group($group_name);
}
if ( defined $option ) {
#print "option: $option, value: $value\n";
# Add the option to the group
$group->insert($option, $value, $if_not_exist);
}
}
#
# Remove a option, given group and option name
#
sub remove {
my ($self, $group_name, $option_name)= @_;
my $group= $self->group($group_name);
die "group '$group_name' does not exist"
unless defined($group);
$group->remove($option_name) or
die "option '$option_name' does not exist";
}
#
# Check if group with given name exists in config
#
sub group_exists {
my ($self, $group_name)= @_;
foreach my $group ($self->groups()) {
return 1 if $group->{name} eq $group_name;
}
return 0;
}
#
# Insert a new group into config
#
sub _group_insert {
my ($self, $group_name)= @_;
caller eq __PACKAGE__ or die;
# Check that group does not already exist
die "Group already exists" if $self->group_exists($group_name);
my $group= My::Config::Group->new($group_name);
push(@{$self->{groups}}, $group);
return $group;
}
#
# Append a configuration to current config
#
sub append {
my ($self, $from)= @_;
foreach my $group ($from->groups()) {
foreach my $option ($group->options()) {
$self->insert($group->name(), $option->name(), $option->value());
}
}
}
#
# Return a list with all the groups in config
#
sub groups {
my ($self)= @_;
return ( @{$self->{groups}} );
}
#
# Return a list of all the groups in config
# starting with the given string
#
sub like {
my ($self, $prefix)= @_;
return ( grep ( $_->{name} =~ /^$prefix/, $self->groups()) );
}
#
# Return the first group in config
# starting with the given string
#
sub first_like {
my ($self, $prefix)= @_;
return ($self->like($prefix))[0];
}
#
# Return a specific group in the config
#
sub group {
my ($self, $group_name)= @_;
foreach my $group ( $self->groups() ) {
return $group if $group->{name} eq $group_name;
}
return undef;
}
#
# Return a list of all options in a specific group in the config
#
sub options_in_group {
my ($self, $group_name)= @_;
my $group= $self->group($group_name);
return () unless defined $group;
return $group->options();
}
#
# Return a value given group and option name
#
sub value {
my ($self, $group_name, $option_name)= @_;
my $group= $self->group($group_name);
die "group '$group_name' does not exist"
unless defined($group);
my $option= $group->option($option_name);
die "option '$option_name' does not exist"
unless defined($option);
return $option->value();
}
#
# Check if an option exists
#
sub exists {
my ($self, $group_name, $option_name)= @_;
my $group= $self->group($group_name);
die "group '$group_name' does not exist"
unless defined($group);
my $option= $group->option($option_name);
return defined($option);
}
# Overload "to string"-operator with 'stringify'
use overload
'""' => \&stringify;
#
# Return the config as a string in my.cnf file format
#
sub stringify {
my ($self)= @_;
my $res;
foreach my $group ($self->groups()) {
$res .= "[$group->{name}]\n";
foreach my $option ($group->options()) {
$res .= $option->name();
my $value= $option->value();
if (defined $value) {
$res .= "=$value";
}
$res .= "\n";
}
$res .= "\n";
}
return $res;
}
#
# Save the config to named file
#
sub save {
my ($self, $path)= @_;
my $F= IO::File->new($path, ">")
or die "Could not open '$path': $!";
print $F $self;
undef $F; # Close the file
}
1;
...@@ -22,8 +22,10 @@ use File::Basename; ...@@ -22,8 +22,10 @@ use File::Basename;
use IO::File(); use IO::File();
use strict; use strict;
use My::Config;
sub collect_test_cases ($); sub collect_test_cases ($);
sub collect_one_suite ($$); sub collect_one_suite ($);
sub collect_one_test_case ($$$$$$$$$); sub collect_one_test_case ($$$$$$$$$);
sub mtr_options_from_test_file($$); sub mtr_options_from_test_file($$);
...@@ -63,7 +65,7 @@ sub collect_test_cases ($) { ...@@ -63,7 +65,7 @@ sub collect_test_cases ($) {
foreach my $suite (split(",", $suites)) foreach my $suite (split(",", $suites))
{ {
collect_one_suite($suite, $cases); push(@$cases, collect_one_suite($suite));
} }
...@@ -207,51 +209,24 @@ sub split_testname { ...@@ -207,51 +209,24 @@ sub split_testname {
} }
sub collect_one_suite($$) sub collect_one_suite($)
{ {
my $suite= shift; # Test suite name my $suite= shift; # Test suite name
my $cases= shift; # List of test cases my @cases; # Array of hash
mtr_verbose("Collecting: $suite"); mtr_verbose("Collecting: $suite");
my $combination_file= "combinations";
my $combinations = [];
my $suitedir= "$::glob_mysql_test_dir"; # Default my $suitedir= "$::glob_mysql_test_dir"; # Default
my $combination_file= "$::glob_mysql_test_dir/$combination_file";
if ( $suite ne "main" ) if ( $suite ne "main" )
{ {
$suitedir= mtr_path_exists("$suitedir/suite/$suite", $suitedir= mtr_path_exists("$suitedir/suite/$suite",
"$suitedir/$suite"); "$suitedir/$suite");
mtr_verbose("suitedir: $suitedir"); mtr_verbose("suitedir: $suitedir");
$combination_file= "$suitedir/$combination_file";
} }
my $testdir= "$suitedir/t"; my $testdir= "$suitedir/t";
my $resdir= "$suitedir/r"; my $resdir= "$suitedir/r";
if (!@::opt_combination)
{
# Read combinations file
if ( open(COMB,$combination_file) )
{
while (<COMB>)
{
chomp;
s/\ +/ /g;
push (@$combinations, $_) unless ($_ eq '');
}
close COMB;
}
}
else
{
# take the combination from command-line
@$combinations = @::opt_combination;
}
# Remember last element position
my $begin_index = $#{@$cases} + 1;
# ---------------------------------------------------------------------- # ----------------------------------------------------------------------
# Build a hash of disabled testcases for this suite # Build a hash of disabled testcases for this suite
# ---------------------------------------------------------------------- # ----------------------------------------------------------------------
...@@ -326,7 +301,7 @@ sub collect_one_suite($$) ...@@ -326,7 +301,7 @@ sub collect_one_suite($$)
} }
collect_one_test_case($testdir,$resdir,$suite,$tname, collect_one_test_case($testdir,$resdir,$suite,$tname,
"$tname.$extension",$cases,\%disabled, "$tname.$extension",\@cases,\%disabled,
$component_id,$suite_opts); $component_id,$suite_opts);
} }
} }
...@@ -356,14 +331,19 @@ sub collect_one_suite($$) ...@@ -356,14 +331,19 @@ sub collect_one_suite($$)
next if ($do_test and not $tname =~ /$do_test/o); next if ($do_test and not $tname =~ /$do_test/o);
collect_one_test_case($testdir,$resdir,$suite,$tname, collect_one_test_case($testdir,$resdir,$suite,$tname,
$elem,$cases,\%disabled,$component_id, $elem,\@cases,\%disabled,$component_id,
$suite_opts); $suite_opts);
} }
closedir TESTDIR; closedir TESTDIR;
} }
# Return empty list if no testcases found
return if (@cases == 0);
# ---------------------------------------------------------------------- # ----------------------------------------------------------------------
# Proccess combinations only if new tests were added # Read combinations for this suite and build testcases x combinations
# if any combinations exists
# ---------------------------------------------------------------------- # ----------------------------------------------------------------------
if (0 and $combinations && $begin_index <= $#{@$cases}) if (0 and $combinations && $begin_index <= $#{@$cases})
{ {
...@@ -434,7 +414,84 @@ sub collect_one_suite($$) ...@@ -434,7 +414,84 @@ sub collect_one_suite($$)
} }
} }
return $cases; optimize_cases(\@cases);
#print_testcases(@cases);
return @cases;
}
#
# Loop through all test cases
# - optimize which test to run by skipping unnecessary ones
# - update settings if necessary
#
sub optimize_cases {
my ($cases)= @_;
foreach my $tinfo ( @$cases )
{
# Skip processing if already marked as skipped
next if $tinfo->{skip};
# Replication test needs an adjustment of binlog format
if (mtr_match_prefix($tinfo->{'name'}, "rpl"))
{
# =======================================================
# Get binlog-format used by this test from master_opt
# =======================================================
my $test_binlog_format;
foreach my $opt ( @{$tinfo->{master_opt}} ) {
$test_binlog_format= $test_binlog_format ||
mtr_match_prefix($opt, "--binlog-format=");
}
# print $tinfo->{name}." uses ".$test_binlog_format."\n";
# =======================================================
# If a special binlog format was selected with
# --mysqld=--binlog-format=x, skip all test with different
# binlog-format
# =======================================================
if (defined $::used_binlog_format and
$test_binlog_format and
$::used_binlog_format ne $test_binlog_format)
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Requires --binlog-format='$test_binlog_format'";
next;
}
# =======================================================
# Check that testcase supports the designated binlog-format
# =======================================================
if ($test_binlog_format and defined $tinfo->{'sup_binlog_formats'} )
{
my $supported=
grep { $_ eq $test_binlog_format } @{$tinfo->{'sup_binlog_formats'}};
if ( !$supported )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}=
"Doesn't support --binlog-format='$test_binlog_format'";
next;
}
}
# =======================================================
# Use dynamic switching of binlog-format if mtr started
# w/o --mysqld=--binlog-format=xxx and combinations.
# =======================================================
if (!defined $tinfo->{'combination'} and
!defined $::used_binlog_format)
{
$test_binlog_format= $tinfo->{'sup_binlog_formats'}->[0];
}
# Save binlog format for dynamic switching
$tinfo->{binlog_format}= $test_binlog_format;
}
}
} }
...@@ -524,6 +581,7 @@ sub collect_one_test_case($$$$$$$$$) { ...@@ -524,6 +581,7 @@ sub collect_one_test_case($$$$$$$$$) {
$tinfo->{'slave_opt'}= []; $tinfo->{'slave_opt'}= [];
$tinfo->{'slave_mi'}= []; $tinfo->{'slave_mi'}= [];
# Add suite opts # Add suite opts
foreach my $opt ( @$suite_opts ) foreach my $opt ( @$suite_opts )
{ {
...@@ -737,14 +795,6 @@ sub collect_one_test_case($$$$$$$$$) { ...@@ -737,14 +795,6 @@ sub collect_one_test_case($$$$$$$$$) {
return; return;
} }
if ( defined $tinfo->{'binlog_format'} and
! ( $tinfo->{'binlog_format'} eq $::used_binlog_format ) )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Requiring binlog format '$tinfo->{'binlog_format'}'";
return;
}
if ( $tinfo->{'need_debug'} && ! $::debug_compiled_binaries ) if ( $tinfo->{'need_debug'} && ! $::debug_compiled_binaries )
{ {
$tinfo->{'skip'}= 1; $tinfo->{'skip'}= 1;
...@@ -824,10 +874,17 @@ sub collect_one_test_case($$$$$$$$$) { ...@@ -824,10 +874,17 @@ sub collect_one_test_case($$$$$$$$$) {
our @tags= our @tags=
( (
["include/have_innodb.inc", "innodb_test", 1], ["include/have_innodb.inc", "innodb_test", 1],
["include/have_binlog_format_row.inc", "binlog_format", "row"], ["include/have_binlog_format_row.inc", "sup_binlog_formats", ["row"]],
["include/have_log_bin.inc", "need_binlog", 1], ["include/have_log_bin.inc", "need_binlog", 1],
["include/have_binlog_format_statement.inc", "binlog_format", "statement"], ["include/have_binlog_format_statement.inc",
["include/have_binlog_format_mixed.inc", "binlog_format", "mixed"], "sup_binlog_formats", ["statement"]],
["include/have_binlog_format_mixed.inc", "sup_binlog_formats", ["mixed"]],
["include/have_binlog_format_mixed_or_row.inc",
"sup_binlog_formats", ["mixed","row"]],
["include/have_binlog_format_mixed_or_statement.inc",
"sup_binlog_formats", ["mixed","statement"]],
["include/have_binlog_format_row_or_statement.inc",
"sup_binlog_formats", ["row","statement"]],
["include/big_test.inc", "big_test", 1], ["include/big_test.inc", "big_test", 1],
["include/have_debug.inc", "need_debug", 1], ["include/have_debug.inc", "need_debug", 1],
["include/have_ndb.inc", "ndb_test", 1], ["include/have_ndb.inc", "ndb_test", 1],
...@@ -853,8 +910,8 @@ sub mtr_options_from_test_file($$) { ...@@ -853,8 +910,8 @@ sub mtr_options_from_test_file($$) {
{ {
if ( index($line, $tag->[0]) >= 0 ) if ( index($line, $tag->[0]) >= 0 )
{ {
# Tag matched, assign value to "tinfo" # Tag matched, assign value to "tinfo"
$tinfo->{"$tag->[1]"}= $tag->[2]; $tinfo->{"$tag->[1]"}= $tag->[2];
} }
} }
...@@ -875,8 +932,29 @@ sub mtr_options_from_test_file($$) { ...@@ -875,8 +932,29 @@ sub mtr_options_from_test_file($$) {
mtr_options_from_test_file($tinfo, $sourced_file); mtr_options_from_test_file($tinfo, $sourced_file);
} }
} }
}
}
sub print_testcases {
my (@cases)= @_;
print "=" x 60, "\n";
foreach my $test (@cases){
print "[", $test->{name}, "]", "\n";
while ((my ($key, $value)) = each(%$test)) {
print " ", $key, "=";
if (ref $value eq "ARRAY") {
print join(", ", @$value);
} else {
print $value;
}
print "\n";
}
print "\n";
} }
print "=" x 60, "\n";
} }
1; 1;
...@@ -280,4 +280,33 @@ sub mtr_cmp_opts ($$) { ...@@ -280,4 +280,33 @@ sub mtr_cmp_opts ($$) {
return 0; # They are the same return 0; # They are the same
} }
#
# Compare two arrays and put all unequal elements into a new one
#
sub mtr_diff_opts ($$) {
my $l1= shift;
my $l2= shift;
my $f;
my $l= [];
foreach my $e1 (@$l1)
{
$f= undef;
foreach my $e2 (@$l2)
{
$f= 1 unless ($e1 ne $e2);
}
push(@$l, $e1) unless (defined $f);
}
foreach my $e2 (@$l2)
{
$f= undef;
foreach my $e1 (@$l1)
{
$f= 1 unless ($e1 ne $e2);
}
push(@$l, $e2) unless (defined $f);
}
return $l;
}
1; 1;
...@@ -50,9 +50,13 @@ my $tot_real_time= 0; ...@@ -50,9 +50,13 @@ my $tot_real_time= 0;
sub mtr_report_test_name ($) { sub mtr_report_test_name ($) {
my $tinfo= shift; my $tinfo= shift;
my $tname= $tinfo->{name};
_mtr_log("$tinfo->{name}"); $tname.= " '$tinfo->{combination}'"
printf "%-30s ", $tinfo->{'name'}; if defined $tinfo->{combination};
_mtr_log($tname);
printf "%-30s ", $tname;
} }
sub mtr_report_test_skipped ($) { sub mtr_report_test_skipped ($) {
......
...@@ -52,6 +52,9 @@ ...@@ -52,6 +52,9 @@
# "perl -d:Trace mysql-test-run.pl" # "perl -d:Trace mysql-test-run.pl"
# #
use lib "lib/";
$Devel::Trace::TRACE= 0; # Don't trace boring init stuff $Devel::Trace::TRACE= 0; # Don't trace boring init stuff
#require 5.6.1; #require 5.6.1;
...@@ -164,7 +167,8 @@ our $opt_bench= 0; ...@@ -164,7 +167,8 @@ our $opt_bench= 0;
our $opt_small_bench= 0; our $opt_small_bench= 0;
our $opt_big_test= 0; our $opt_big_test= 0;
our @opt_combination; our @opt_combinations;
our $opt_skip_combination;
our @opt_extra_mysqld_opt; our @opt_extra_mysqld_opt;
...@@ -531,7 +535,8 @@ sub command_line_setup () { ...@@ -531,7 +535,8 @@ sub command_line_setup () {
'skip-im' => \$opt_skip_im, 'skip-im' => \$opt_skip_im,
'skip-test=s' => \$opt_skip_test, 'skip-test=s' => \$opt_skip_test,
'big-test' => \$opt_big_test, 'big-test' => \$opt_big_test,
'combination=s' => \@opt_combination, 'combination=s' => \@opt_combinations,
'skip-combination' => \$opt_skip_combination,
# Specify ports # Specify ports
'master_port=i' => \$opt_master_myport, 'master_port=i' => \$opt_master_myport,
...@@ -792,20 +797,23 @@ sub command_line_setup () { ...@@ -792,20 +797,23 @@ sub command_line_setup () {
# -------------------------------------------------------------------------- # --------------------------------------------------------------------------
# Find out type of logging that are being used # Find out type of logging that are being used
# -------------------------------------------------------------------------- # --------------------------------------------------------------------------
# NOTE if the default binlog format is changed, this has to be changed
$used_binlog_format= "statement";
if (!$opt_extern && $mysql_version_id >= 50100 ) if (!$opt_extern && $mysql_version_id >= 50100 )
{ {
$used_binlog_format= "mixed"; # Default value for binlog format
foreach my $arg ( @opt_extra_mysqld_opt ) foreach my $arg ( @opt_extra_mysqld_opt )
{ {
if ( $arg =~ /binlog[-_]format=(\S+)/ ) if ( $arg =~ /binlog[-_]format=(\S+)/ )
{ {
$used_binlog_format= $1; $used_binlog_format= $1;
} }
} }
mtr_report("Using binlog format '$used_binlog_format'"); if (defined $used_binlog_format)
{
mtr_report("Using binlog format '$used_binlog_format'");
}
else
{
mtr_report("Using dynamic switching of binlog format");
}
} }
...@@ -923,6 +931,10 @@ sub command_line_setup () { ...@@ -923,6 +931,10 @@ sub command_line_setup () {
mtr_error("Will not run in record mode without a specific test case"); mtr_error("Will not run in record mode without a specific test case");
} }
if ( $opt_record )
{
$opt_skip_combination = 1;
}
# -------------------------------------------------------------------------- # --------------------------------------------------------------------------
# ps protcol flag # ps protcol flag
...@@ -3304,6 +3316,7 @@ sub run_testcase_check_skip_test($) ...@@ -3304,6 +3316,7 @@ sub run_testcase_check_skip_test($)
sub do_before_run_mysqltest($) sub do_before_run_mysqltest($)
{ {
my $tinfo= shift; my $tinfo= shift;
my $args;
# Remove old files produced by mysqltest # Remove old files produced by mysqltest
my $base_file= mtr_match_extension($tinfo->{'result_file'}, my $base_file= mtr_match_extension($tinfo->{'result_file'},
...@@ -3324,6 +3337,28 @@ sub do_before_run_mysqltest($) ...@@ -3324,6 +3337,28 @@ sub do_before_run_mysqltest($)
# if script decided to run mysqltest cluster _is_ installed ok # if script decided to run mysqltest cluster _is_ installed ok
$ENV{'NDB_STATUS_OK'} = "YES"; $ENV{'NDB_STATUS_OK'} = "YES";
} }
if (defined $tinfo->{binlog_format} and $mysql_version_id > 50100 )
{
# Dynamically switch binlog format of
# master, slave is always restarted
foreach my $server ( @$master )
{
next unless ($server->{'pid'});
mtr_init_args(\$args);
mtr_add_arg($args, "--no-defaults");
mtr_add_arg($args, "--user=root");
mtr_add_arg($args, "--port=$server->{'port'}");
mtr_add_arg($args, "--socket=$server->{'path_sock'}");
my $sql= "include/set_binlog_format_".$tinfo->{binlog_format}.".sql";
mtr_verbose("Setting binlog format:", $tinfo->{binlog_format});
if (mtr_run($exe_mysql, $args, $sql, "", "", "") != 0)
{
mtr_error("Failed to switch binlog format");
}
}
}
} }
} }
...@@ -3755,12 +3790,11 @@ sub mysqld_arguments ($$$$) { ...@@ -3755,12 +3790,11 @@ sub mysqld_arguments ($$$$) {
mtr_add_arg($args, "%s--connect-timeout=60", $prefix); mtr_add_arg($args, "%s--connect-timeout=60", $prefix);
# When mysqld is run by a root user(euid is 0), it will fail # When mysqld is run by a root user(euid is 0), it will fail
# to start unless we specify what user to run as. If not running # to start unless we specify what user to run as, see BUG#30630
# as root it will be ignored, see BUG#30630
my $euid= $>; my $euid= $>;
if (!$glob_win32 and $euid == 0 and if (!$glob_win32 and $euid == 0 and
grep(/^--user/, @$extra_opt, @opt_extra_mysqld_opt) == 0) { (grep(/^--user/, @$extra_opt, @opt_extra_mysqld_opt)) == 0) {
mtr_add_arg($args, "%s--user=root"); mtr_add_arg($args, "%s--user=root", $prefix);
} }
if ( $opt_valgrind_mysqld ) if ( $opt_valgrind_mysqld )
...@@ -3867,7 +3901,7 @@ sub mysqld_arguments ($$$$) { ...@@ -3867,7 +3901,7 @@ sub mysqld_arguments ($$$$) {
my $slave_load_path= "../tmp"; my $slave_load_path= "../tmp";
mtr_add_arg($args, "%s--slave-load-tmpdir=%s", $prefix, mtr_add_arg($args, "%s--slave-load-tmpdir=%s", $prefix,
$slave_load_path); $slave_load_path);
mtr_add_arg($args, "%s--set-variable=slave_net_timeout=10", $prefix); mtr_add_arg($args, "%s--set-variable=slave_net_timeout=120", $prefix);
if ( @$slave_master_info ) if ( @$slave_master_info )
{ {
...@@ -4220,10 +4254,19 @@ sub run_testcase_need_master_restart($) ...@@ -4220,10 +4254,19 @@ sub run_testcase_need_master_restart($)
elsif (! mtr_same_opts($master->[0]->{'start_opts'}, elsif (! mtr_same_opts($master->[0]->{'start_opts'},
$tinfo->{'master_opt'}) ) $tinfo->{'master_opt'}) )
{ {
$do_restart= 1; # Chech that diff is binlog format only
mtr_verbose("Restart master: running with different options '" . my $diff_opts= mtr_diff_opts($master->[0]->{'start_opts'},$tinfo->{'master_opt'});
join(" ", @{$tinfo->{'master_opt'}}) . "' != '" . if (scalar(@$diff_opts) eq 2)
join(" ", @{$master->[0]->{'start_opts'}}) . "'" ); {
$do_restart= 1 unless ($diff_opts->[0] =~/^--binlog-format=/ and $diff_opts->[1] =~/^--binlog-format=/);
}
else
{
$do_restart= 1;
mtr_verbose("Restart master: running with different options '" .
join(" ", @{$tinfo->{'master_opt'}}) . "' != '" .
join(" ", @{$master->[0]->{'start_opts'}}) . "'" );
}
} }
elsif( ! $master->[0]->{'pid'} ) elsif( ! $master->[0]->{'pid'} )
{ {
...@@ -5146,8 +5189,9 @@ Options to control what test suites or cases to run ...@@ -5146,8 +5189,9 @@ Options to control what test suites or cases to run
skip-im Don't start IM, and skip the IM test cases skip-im Don't start IM, and skip the IM test cases
big-test Set the environment variable BIG_TEST, which can be big-test Set the environment variable BIG_TEST, which can be
checked from test cases. checked from test cases.
combination="ARG1 .. ARG2" Specify a set of "mysqld" arguments for one combination="ARG1 .. ARG2" Specify a set of "mysqld" arguments for one
combination. combination.
skip-combination Skip any combination options and combinations files
Options that specify ports Options that specify ports
......
set autocommit=1;
reset master;
create table bug16206 (a int);
insert into bug16206 values(1);
start transaction;
insert into bug16206 values(2);
commit;
show binlog events;
Log_name Pos Event_type Server_id End_log_pos Info
f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4
f n Query 1 n use `test`; create table bug16206 (a int)
f n Query 1 n use `test`; insert into bug16206 values(1)
f n Query 1 n use `test`; insert into bug16206 values(2)
drop table bug16206;
reset master;
create table bug16206 (a int) engine= bdb;
insert into bug16206 values(0);
insert into bug16206 values(1);
start transaction;
insert into bug16206 values(2);
commit;
insert into bug16206 values(3);
show binlog events;
Log_name Pos Event_type Server_id End_log_pos Info
f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4
f n Query 1 n use `test`; create table bug16206 (a int) engine= bdb
f n Query 1 n use `test`; insert into bug16206 values(0)
f n Query 1 n use `test`; insert into bug16206 values(1)
f n Query 1 n use `test`; BEGIN
f n Query 1 n use `test`; insert into bug16206 values(2)
f n Query 1 n use `test`; COMMIT
f n Query 1 n use `test`; insert into bug16206 values(3)
drop table bug16206;
set autocommit=0;
End of 5.0 tests
...@@ -352,4 +352,29 @@ a b ...@@ -352,4 +352,29 @@ a b
1 root@localhost 1 root@localhost
DROP DATABASE mysqltest1; DROP DATABASE mysqltest1;
DROP USER untrusted@localhost; DROP USER untrusted@localhost;
BUG#32580: mysqlbinlog cannot read binlog event with user variables
USE test;
SET BINLOG_FORMAT = STATEMENT;
FLUSH LOGS;
CREATE TABLE t1 (a_real FLOAT, an_int INT, a_decimal DECIMAL(5,2), a_string CHAR(32));
SET @a_real = rand(20) * 1000;
SET @an_int = 1000;
SET @a_decimal = CAST(rand(19) * 999 AS DECIMAL(5,2));
SET @a_string = 'Just a test';
INSERT INTO t1 VALUES (@a_real, @an_int, @a_decimal, @a_string);
FLUSH LOGS;
SELECT * FROM t1;
a_real 158.883
an_int 1000
a_decimal 907.79
a_string Just a test
DROP TABLE t1;
>> mysqlbinlog var/log/master-bin.000019 > var/tmp/bug32580.sql
>> mysql test < var/tmp/bug32580.sql
SELECT * FROM t1;
a_real 158.883
an_int 1000
a_decimal 907.79
a_string Just a test
DROP TABLE t1;
End of 5.1 tests End of 5.1 tests
[row]
--binlog-format=row
[stmt]
--binlog-format=statement
[mix]
--binlog-format=mixed
...@@ -116,23 +116,23 @@ t12 ...@@ -116,23 +116,23 @@ t12
t13 t13
t2 t2
t3 t3
SELECT table_name FROM information_schema.views WHERE table_schema='test'; SELECT table_name FROM information_schema.views WHERE table_schema='test' ORDER BY table_name;
table_name table_name
v1 v1
v11 v11
SELECT trigger_name, event_manipulation, event_object_table FROM information_schema.triggers WHERE trigger_schema='test'; SELECT trigger_name, event_manipulation, event_object_table FROM information_schema.triggers WHERE trigger_schema='test' ORDER BY trigger_name;
trigger_name event_manipulation event_object_table trigger_name event_manipulation event_object_table
t1_tr1 INSERT t1
t1_tr2 UPDATE t1
t11_tr1 INSERT t11 t11_tr1 INSERT t11
t11_tr2 UPDATE t11 t11_tr2 UPDATE t11
SELECT routine_type, routine_name FROM information_schema.routines WHERE routine_schema='test'; t1_tr1 INSERT t1
t1_tr2 UPDATE t1
SELECT routine_type, routine_name FROM information_schema.routines WHERE routine_schema='test' ORDER BY routine_name;
routine_type routine_name routine_type routine_name
FUNCTION f1 FUNCTION f1
FUNCTION f2 FUNCTION f2
PROCEDURE p1 PROCEDURE p1
PROCEDURE p11 PROCEDURE p11
SELECT event_name, status FROM information_schema.events WHERE event_schema='test'; SELECT event_name, status FROM information_schema.events WHERE event_schema='test' ORDER BY event_name;
event_name status event_name status
e1 DISABLED e1 DISABLED
e11 DISABLED e11 DISABLED
...@@ -276,23 +276,23 @@ t12 ...@@ -276,23 +276,23 @@ t12
t13 t13
t2 t2
t3 t3
SELECT table_name FROM information_schema.views WHERE table_schema='test'; SELECT table_name FROM information_schema.views WHERE table_schema='test' ORDER BY table_name;
table_name table_name
v1 v1
v11 v11
SELECT trigger_name, event_manipulation, event_object_table FROM information_schema.triggers WHERE trigger_schema='test'; SELECT trigger_name, event_manipulation, event_object_table FROM information_schema.triggers WHERE trigger_schema='test' ORDER BY trigger_name;
trigger_name event_manipulation event_object_table trigger_name event_manipulation event_object_table
t1_tr1 INSERT t1
t1_tr2 UPDATE t1
t11_tr1 INSERT t11 t11_tr1 INSERT t11
t11_tr2 UPDATE t11 t11_tr2 UPDATE t11
SELECT routine_type, routine_name FROM information_schema.routines WHERE routine_schema='test'; t1_tr1 INSERT t1
t1_tr2 UPDATE t1
SELECT routine_type, routine_name FROM information_schema.routines WHERE routine_schema='test' ORDER BY routine_name;
routine_type routine_name routine_type routine_name
FUNCTION f1 FUNCTION f1
FUNCTION f2 FUNCTION f2
PROCEDURE p1 PROCEDURE p1
PROCEDURE p11 PROCEDURE p11
SELECT event_name, status FROM information_schema.events WHERE event_schema='test'; SELECT event_name, status FROM information_schema.events WHERE event_schema='test' ORDER BY event_name;
event_name status event_name status
e1 SLAVESIDE_DISABLED e1 SLAVESIDE_DISABLED
e11 SLAVESIDE_DISABLED e11 SLAVESIDE_DISABLED
......
...@@ -202,10 +202,10 @@ SET GLOBAL EVENT_SCHEDULER = off; ...@@ -202,10 +202,10 @@ SET GLOBAL EVENT_SCHEDULER = off;
# Check original objects # Check original objects
--echo --echo
SHOW TABLES LIKE 't%'; SHOW TABLES LIKE 't%';
SELECT table_name FROM information_schema.views WHERE table_schema='test'; SELECT table_name FROM information_schema.views WHERE table_schema='test' ORDER BY table_name;
SELECT trigger_name, event_manipulation, event_object_table FROM information_schema.triggers WHERE trigger_schema='test'; SELECT trigger_name, event_manipulation, event_object_table FROM information_schema.triggers WHERE trigger_schema='test' ORDER BY trigger_name;
SELECT routine_type, routine_name FROM information_schema.routines WHERE routine_schema='test'; SELECT routine_type, routine_name FROM information_schema.routines WHERE routine_schema='test' ORDER BY routine_name;
SELECT event_name, status FROM information_schema.events WHERE event_schema='test'; SELECT event_name, status FROM information_schema.events WHERE event_schema='test' ORDER BY event_name;
# Check original data # Check original data
--echo --echo
...@@ -229,10 +229,10 @@ SELECT a,b FROM v11 ORDER BY a; ...@@ -229,10 +229,10 @@ SELECT a,b FROM v11 ORDER BY a;
# Check replicated objects # Check replicated objects
--echo --echo
SHOW TABLES LIKE 't%'; SHOW TABLES LIKE 't%';
SELECT table_name FROM information_schema.views WHERE table_schema='test'; SELECT table_name FROM information_schema.views WHERE table_schema='test' ORDER BY table_name;
SELECT trigger_name, event_manipulation, event_object_table FROM information_schema.triggers WHERE trigger_schema='test'; SELECT trigger_name, event_manipulation, event_object_table FROM information_schema.triggers WHERE trigger_schema='test' ORDER BY trigger_name;
SELECT routine_type, routine_name FROM information_schema.routines WHERE routine_schema='test'; SELECT routine_type, routine_name FROM information_schema.routines WHERE routine_schema='test' ORDER BY routine_name;
SELECT event_name, status FROM information_schema.events WHERE event_schema='test'; SELECT event_name, status FROM information_schema.events WHERE event_schema='test' ORDER BY event_name;
# Check replicated data # Check replicated data
--echo --echo
......
...@@ -109,6 +109,7 @@ DROP TABLE t1, t1_slave; ...@@ -109,6 +109,7 @@ DROP TABLE t1, t1_slave;
DROP PROCEDURE test_replication_sp1; DROP PROCEDURE test_replication_sp1;
DROP PROCEDURE test_replication_sp2; DROP PROCEDURE test_replication_sp2;
DROP FUNCTION test_replication_sf; DROP FUNCTION test_replication_sf;
--remove_file $MYSQLTEST_VARDIR/master-data/test/rpl_misc_functions.outfile
--sync_slave_with_master --sync_slave_with_master
...@@ -51,8 +51,9 @@ CREATE TABLE t1 ( ...@@ -51,8 +51,9 @@ CREATE TABLE t1 (
`data` varchar(100), `data` varchar(100),
PRIMARY KEY (`id`) PRIMARY KEY (`id`)
) ENGINE=MyISAM; ) ENGINE=MyISAM;
--disable_warnings
INSERT INTO t1(data) VALUES(SESSION_USER()); INSERT INTO t1(data) VALUES(SESSION_USER());
--enable_warnings
save_master_pos; save_master_pos;
connection slave; connection slave;
sync_with_master; sync_with_master;
......
-- source include/not_embedded.inc
-- source include/have_bdb.inc
#
# Bug #16206: Superfluous COMMIT event in binlog when updating BDB in autocommit mode
#
set autocommit=1;
let $VERSION=`select version()`;
reset master;
create table bug16206 (a int);
insert into bug16206 values(1);
start transaction;
insert into bug16206 values(2);
commit;
--replace_result $VERSION VERSION
--replace_column 1 f 2 n 5 n
show binlog events;
drop table bug16206;
reset master;
create table bug16206 (a int) engine= bdb;
insert into bug16206 values(0);
insert into bug16206 values(1);
start transaction;
insert into bug16206 values(2);
commit;
insert into bug16206 values(3);
--replace_result $VERSION VERSION
--replace_column 1 f 2 n 5 n
show binlog events;
drop table bug16206;
set autocommit=0;
--echo End of 5.0 tests
...@@ -282,4 +282,31 @@ connection default; ...@@ -282,4 +282,31 @@ connection default;
DROP DATABASE mysqltest1; DROP DATABASE mysqltest1;
DROP USER untrusted@localhost; DROP USER untrusted@localhost;
--echo BUG#32580: mysqlbinlog cannot read binlog event with user variables
# Testing that various kinds of events can be read and restored properly.
connection default;
USE test;
SET BINLOG_FORMAT = STATEMENT;
FLUSH LOGS;
CREATE TABLE t1 (a_real FLOAT, an_int INT, a_decimal DECIMAL(5,2), a_string CHAR(32));
SET @a_real = rand(20) * 1000;
SET @an_int = 1000;
SET @a_decimal = CAST(rand(19) * 999 AS DECIMAL(5,2));
SET @a_string = 'Just a test';
INSERT INTO t1 VALUES (@a_real, @an_int, @a_decimal, @a_string);
FLUSH LOGS;
query_vertical SELECT * FROM t1;
DROP TABLE t1;
echo >> mysqlbinlog var/log/master-bin.000019 > var/tmp/bug32580.sql;
exec $MYSQL_BINLOG $MYSQLTEST_VARDIR/log/master-bin.000019 > $MYSQLTEST_VARDIR/tmp/bug32580.sql;
echo >> mysql test < var/tmp/bug32580.sql;
exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/bug32580.sql;
remove_file $MYSQLTEST_VARDIR/tmp/bug32580.sql;
query_vertical SELECT * FROM t1;
DROP TABLE t1;
--echo End of 5.1 tests --echo End of 5.1 tests
...@@ -396,6 +396,7 @@ copyfileto $BASE/mysql-test \ ...@@ -396,6 +396,7 @@ copyfileto $BASE/mysql-test \
$CP mysql-test/lib/*.pl $BASE/mysql-test/lib $CP mysql-test/lib/*.pl $BASE/mysql-test/lib
$CP mysql-test/t/*.def $BASE/mysql-test/t $CP mysql-test/t/*.def $BASE/mysql-test/t
$CP mysql-test/include/*.inc $BASE/mysql-test/include $CP mysql-test/include/*.inc $BASE/mysql-test/include
$CP mysql-test/include/*.sql $BASE/mysql-test/include
$CP mysql-test/include/*.test $BASE/mysql-test/include $CP mysql-test/include/*.test $BASE/mysql-test/include
$CP mysql-test/t/*.def $BASE/mysql-test/t $CP mysql-test/t/*.def $BASE/mysql-test/t
$CP mysql-test/std_data/*.dat mysql-test/std_data/*.frm \ $CP mysql-test/std_data/*.dat mysql-test/std_data/*.frm \
......
...@@ -36,6 +36,16 @@ ...@@ -36,6 +36,16 @@
#define FLAGSTR(V,F) ((V)&(F)?#F" ":"") #define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
/*
Size of buffer for printing a double in format %.<PREC>g
optional '-' + optional zero + '.' + PREC digits + 'e' + sign +
exponent digits + '\0'
*/
#define FMT_G_BUFSIZE(PREC) (3 + (PREC) + 5 + 1)
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) && !defined(DBUG_OFF) && !defined(_lint) #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) && !defined(DBUG_OFF) && !defined(_lint)
static const char *HA_ERR(int i) static const char *HA_ERR(int i)
{ {
...@@ -4508,8 +4518,10 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) ...@@ -4508,8 +4518,10 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
switch (type) { switch (type) {
case REAL_RESULT: case REAL_RESULT:
double real_val; double real_val;
char real_buf[FMT_G_BUFSIZE(14)];
float8get(real_val, val); float8get(real_val, val);
my_b_printf(&cache, ":=%.14g%s\n", real_val, print_event_info->delimiter); my_sprintf(real_buf, (real_buf, "%.14g", real_val));
my_b_printf(&cache, ":=%s%s\n", real_buf, print_event_info->delimiter);
break; break;
case INT_RESULT: case INT_RESULT:
char int_buf[22]; char int_buf[22];
......
...@@ -11,9 +11,9 @@ ...@@ -11,9 +11,9 @@
// Old implementation of do_apply_event() // Old implementation of do_apply_event()
int int
Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli) Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info *rli)
{ {
DBUG_ENTER("Rows_log_event::do_apply_event(st_relay_log_info*)"); DBUG_ENTER("Old_rows_log_event::do_apply_event(st_relay_log_info*)");
int error= 0; int error= 0;
THD *thd= ev->thd; THD *thd= ev->thd;
uchar const *row_start= ev->m_rows_buf; uchar const *row_start= ev->m_rows_buf;
...@@ -30,7 +30,7 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli ...@@ -30,7 +30,7 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli
This one is supposed to be set: just an extra check so that This one is supposed to be set: just an extra check so that
nothing strange has happened. nothing strange has happened.
*/ */
DBUG_ASSERT(ev->get_flags(Rows_log_event::STMT_END_F)); DBUG_ASSERT(ev->get_flags(Old_rows_log_event::STMT_END_F));
const_cast<Relay_log_info*>(rli)->clear_tables_to_lock(); const_cast<Relay_log_info*>(rli)->clear_tables_to_lock();
close_thread_tables(thd); close_thread_tables(thd);
...@@ -148,7 +148,7 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli ...@@ -148,7 +148,7 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli
thd->lock= 0; thd->lock= 0;
thd->is_slave_error= 1; thd->is_slave_error= 1;
const_cast<Relay_log_info*>(rli)->clear_tables_to_lock(); const_cast<Relay_log_info*>(rli)->clear_tables_to_lock();
DBUG_RETURN(Rows_log_event::ERR_BAD_TABLE_DEF); DBUG_RETURN(Old_rows_log_event::ERR_BAD_TABLE_DEF);
} }
} }
} }
...@@ -163,8 +163,8 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli ...@@ -163,8 +163,8 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli
TODO [/Matz]: Maybe the query cache should not be invalidated TODO [/Matz]: Maybe the query cache should not be invalidated
here? It might be that a table is not changed, even though it here? It might be that a table is not changed, even though it
was locked for the statement. We do know that each was locked for the statement. We do know that each
Rows_log_event contain at least one row, so after processing one Old_rows_log_event contain at least one row, so after processing one
Rows_log_event, we can invalidate the query cache for the Old_rows_log_event, we can invalidate the query cache for the
associated table. associated table.
*/ */
for (TABLE_LIST *ptr= rli->tables_to_lock ; ptr ; ptr= ptr->next_global) for (TABLE_LIST *ptr= rli->tables_to_lock ; ptr ; ptr= ptr->next_global)
...@@ -200,12 +200,12 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli ...@@ -200,12 +200,12 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli
Make sure to set/clear them before executing the main body of Make sure to set/clear them before executing the main body of
the event. the event.
*/ */
if (ev->get_flags(Rows_log_event::NO_FOREIGN_KEY_CHECKS_F)) if (ev->get_flags(Old_rows_log_event::NO_FOREIGN_KEY_CHECKS_F))
thd->options|= OPTION_NO_FOREIGN_KEY_CHECKS; thd->options|= OPTION_NO_FOREIGN_KEY_CHECKS;
else else
thd->options&= ~OPTION_NO_FOREIGN_KEY_CHECKS; thd->options&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
if (ev->get_flags(Rows_log_event::RELAXED_UNIQUE_CHECKS_F)) if (ev->get_flags(Old_rows_log_event::RELAXED_UNIQUE_CHECKS_F))
thd->options|= OPTION_RELAXED_UNIQUE_CHECKS; thd->options|= OPTION_RELAXED_UNIQUE_CHECKS;
else else
thd->options&= ~OPTION_RELAXED_UNIQUE_CHECKS; thd->options&= ~OPTION_RELAXED_UNIQUE_CHECKS;
...@@ -275,7 +275,7 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli ...@@ -275,7 +275,7 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli
We need to delay this clear until the table def is no longer needed. We need to delay this clear until the table def is no longer needed.
The table def is needed in unpack_row(). The table def is needed in unpack_row().
*/ */
if (rli->tables_to_lock && ev->get_flags(Rows_log_event::STMT_END_F)) if (rli->tables_to_lock && ev->get_flags(Old_rows_log_event::STMT_END_F))
const_cast<Relay_log_info*>(rli)->clear_tables_to_lock(); const_cast<Relay_log_info*>(rli)->clear_tables_to_lock();
if (error) if (error)
...@@ -311,7 +311,7 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli ...@@ -311,7 +311,7 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli
*/ */
if (table && (table->s->primary_key == MAX_KEY) && if (table && (table->s->primary_key == MAX_KEY) &&
!ev->cache_stmt && !ev->cache_stmt &&
ev->get_flags(Rows_log_event::STMT_END_F) == Rows_log_event::RLE_NO_FLAGS) ev->get_flags(Old_rows_log_event::STMT_END_F) == Old_rows_log_event::RLE_NO_FLAGS)
{ {
/* /*
------------ Temporary fix until WL#2975 is implemented --------- ------------ Temporary fix until WL#2975 is implemented ---------
...@@ -323,7 +323,7 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli ...@@ -323,7 +323,7 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli
present, and idempotency is not guaranteed (no PK) so we risk present, and idempotency is not guaranteed (no PK) so we risk
that repeating leads to double insert. So we desperately try to that repeating leads to double insert. So we desperately try to
continue, hope we'll eventually leave this buggy situation (by continue, hope we'll eventually leave this buggy situation (by
executing the final Rows_log_event). If we are in a hopeless executing the final Old_rows_log_event). If we are in a hopeless
wait (reached end of last relay log and nothing gets appended wait (reached end of last relay log and nothing gets appended
there), we timeout after one minute, and notify DBA about the there), we timeout after one minute, and notify DBA about the
problem. When WL#2975 is implemented, just remove the member problem. When WL#2975 is implemented, just remove the member
...@@ -336,6 +336,7 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli ...@@ -336,6 +336,7 @@ Old_rows_log_event::do_apply_event(Rows_log_event *ev, const Relay_log_info *rli
} }
#endif #endif
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
/* /*
...@@ -350,6 +351,7 @@ last_uniq_key(TABLE *table, uint keyno) ...@@ -350,6 +351,7 @@ last_uniq_key(TABLE *table, uint keyno)
return 1; return 1;
} }
/* /*
Compares table->record[0] and table->record[1] Compares table->record[0] and table->record[1]
...@@ -428,6 +430,7 @@ record_compare_exit: ...@@ -428,6 +430,7 @@ record_compare_exit:
return result; return result;
} }
/* /*
Copy "extra" columns from record[1] to record[0]. Copy "extra" columns from record[1] to record[0].
...@@ -516,6 +519,7 @@ copy_extra_record_fields(TABLE *table, ...@@ -516,6 +519,7 @@ copy_extra_record_fields(TABLE *table,
DBUG_RETURN(0); // All OK DBUG_RETURN(0); // All OK
} }
/* /*
Replace the provided record in the database. Replace the provided record in the database.
...@@ -668,6 +672,7 @@ replace_record(THD *thd, TABLE *table, ...@@ -668,6 +672,7 @@ replace_record(THD *thd, TABLE *table,
DBUG_RETURN(error); DBUG_RETURN(error);
} }
/** /**
Find the row given by 'key', if the table has keys, or else use a table scan Find the row given by 'key', if the table has keys, or else use a table scan
to find (and fetch) the row. to find (and fetch) the row.
...@@ -879,6 +884,7 @@ static int find_and_fetch_row(TABLE *table, uchar *key) ...@@ -879,6 +884,7 @@ static int find_and_fetch_row(TABLE *table, uchar *key)
DBUG_RETURN(0); DBUG_RETURN(0);
} }
/********************************************************** /**********************************************************
Row handling primitives for Write_rows_log_event_old Row handling primitives for Write_rows_log_event_old
**********************************************************/ **********************************************************/
...@@ -944,6 +950,7 @@ int Write_rows_log_event_old::do_before_row_operations(TABLE *table) ...@@ -944,6 +950,7 @@ int Write_rows_log_event_old::do_before_row_operations(TABLE *table)
return error; return error;
} }
int Write_rows_log_event_old::do_after_row_operations(TABLE *table, int error) int Write_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
{ {
int local_error= 0; int local_error= 0;
...@@ -962,6 +969,7 @@ int Write_rows_log_event_old::do_after_row_operations(TABLE *table, int error) ...@@ -962,6 +969,7 @@ int Write_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
return error? error : local_error; return error? error : local_error;
} }
int int
Write_rows_log_event_old::do_prepare_row(THD *thd_arg, Write_rows_log_event_old::do_prepare_row(THD *thd_arg,
Relay_log_info const *rli, Relay_log_info const *rli,
...@@ -981,6 +989,7 @@ Write_rows_log_event_old::do_prepare_row(THD *thd_arg, ...@@ -981,6 +989,7 @@ Write_rows_log_event_old::do_prepare_row(THD *thd_arg,
return error; return error;
} }
int Write_rows_log_event_old::do_exec_row(TABLE *table) int Write_rows_log_event_old::do_exec_row(TABLE *table)
{ {
DBUG_ASSERT(table != NULL); DBUG_ASSERT(table != NULL);
...@@ -988,6 +997,7 @@ int Write_rows_log_event_old::do_exec_row(TABLE *table) ...@@ -988,6 +997,7 @@ int Write_rows_log_event_old::do_exec_row(TABLE *table)
return error; return error;
} }
/********************************************************** /**********************************************************
Row handling primitives for Delete_rows_log_event_old Row handling primitives for Delete_rows_log_event_old
**********************************************************/ **********************************************************/
...@@ -1029,6 +1039,7 @@ int Delete_rows_log_event_old::do_before_row_operations(TABLE *table) ...@@ -1029,6 +1039,7 @@ int Delete_rows_log_event_old::do_before_row_operations(TABLE *table)
return error; return error;
} }
int Delete_rows_log_event_old::do_after_row_operations(TABLE *table, int error) int Delete_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
{ {
/*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/ /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
...@@ -1041,6 +1052,7 @@ int Delete_rows_log_event_old::do_after_row_operations(TABLE *table, int error) ...@@ -1041,6 +1052,7 @@ int Delete_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
return error; return error;
} }
int int
Delete_rows_log_event_old::do_prepare_row(THD *thd_arg, Delete_rows_log_event_old::do_prepare_row(THD *thd_arg,
Relay_log_info const *rli, Relay_log_info const *rli,
...@@ -1074,6 +1086,7 @@ Delete_rows_log_event_old::do_prepare_row(THD *thd_arg, ...@@ -1074,6 +1086,7 @@ Delete_rows_log_event_old::do_prepare_row(THD *thd_arg,
return error; return error;
} }
int Delete_rows_log_event_old::do_exec_row(TABLE *table) int Delete_rows_log_event_old::do_exec_row(TABLE *table)
{ {
int error; int error;
...@@ -1091,6 +1104,7 @@ int Delete_rows_log_event_old::do_exec_row(TABLE *table) ...@@ -1091,6 +1104,7 @@ int Delete_rows_log_event_old::do_exec_row(TABLE *table)
return error; return error;
} }
/********************************************************** /**********************************************************
Row handling primitives for Update_rows_log_event_old Row handling primitives for Update_rows_log_event_old
**********************************************************/ **********************************************************/
...@@ -1124,6 +1138,7 @@ int Update_rows_log_event_old::do_before_row_operations(TABLE *table) ...@@ -1124,6 +1138,7 @@ int Update_rows_log_event_old::do_before_row_operations(TABLE *table)
return error; return error;
} }
int Update_rows_log_event_old::do_after_row_operations(TABLE *table, int error) int Update_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
{ {
/*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/ /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
...@@ -1136,6 +1151,7 @@ int Update_rows_log_event_old::do_after_row_operations(TABLE *table, int error) ...@@ -1136,6 +1151,7 @@ int Update_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
return error; return error;
} }
int Update_rows_log_event_old::do_prepare_row(THD *thd_arg, int Update_rows_log_event_old::do_prepare_row(THD *thd_arg,
Relay_log_info const *rli, Relay_log_info const *rli,
TABLE *table, TABLE *table,
...@@ -1179,6 +1195,7 @@ int Update_rows_log_event_old::do_prepare_row(THD *thd_arg, ...@@ -1179,6 +1195,7 @@ int Update_rows_log_event_old::do_prepare_row(THD *thd_arg,
return error; return error;
} }
int Update_rows_log_event_old::do_exec_row(TABLE *table) int Update_rows_log_event_old::do_exec_row(TABLE *table)
{ {
DBUG_ASSERT(table != NULL); DBUG_ASSERT(table != NULL);
...@@ -1217,3 +1234,1745 @@ int Update_rows_log_event_old::do_exec_row(TABLE *table) ...@@ -1217,3 +1234,1745 @@ int Update_rows_log_event_old::do_exec_row(TABLE *table)
} }
#endif #endif
/**************************************************************************
Rows_log_event member functions
**************************************************************************/
#ifndef MYSQL_CLIENT
Old_rows_log_event::Old_rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid,
MY_BITMAP const *cols,
bool is_transactional)
: Log_event(thd_arg, 0, is_transactional),
m_row_count(0),
m_table(tbl_arg),
m_table_id(tid),
m_width(tbl_arg ? tbl_arg->s->fields : 1),
m_rows_buf(0), m_rows_cur(0), m_rows_end(0), m_flags(0)
#ifdef HAVE_REPLICATION
, m_curr_row(NULL), m_curr_row_end(NULL), m_key(NULL)
#endif
{
// This constructor should not be reached.
assert(0);
/*
We allow a special form of dummy event when the table, and cols
are null and the table id is ~0UL. This is a temporary
solution, to be able to terminate a started statement in the
binary log: the extraneous events will be removed in the future.
*/
DBUG_ASSERT(tbl_arg && tbl_arg->s && tid != ~0UL ||
!tbl_arg && !cols && tid == ~0UL);
if (thd_arg->options & OPTION_NO_FOREIGN_KEY_CHECKS)
set_flags(NO_FOREIGN_KEY_CHECKS_F);
if (thd_arg->options & OPTION_RELAXED_UNIQUE_CHECKS)
set_flags(RELAXED_UNIQUE_CHECKS_F);
/* if bitmap_init fails, caught in is_valid() */
if (likely(!bitmap_init(&m_cols,
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
m_width,
false)))
{
/* Cols can be zero if this is a dummy binrows event */
if (likely(cols != NULL))
{
memcpy(m_cols.bitmap, cols->bitmap, no_bytes_in_map(cols));
create_last_word_mask(&m_cols);
}
}
else
{
// Needed because bitmap_init() does not set it to null on failure
m_cols.bitmap= 0;
}
}
#endif
Old_rows_log_event::Old_rows_log_event(const char *buf, uint event_len,
Log_event_type event_type,
const Format_description_log_event
*description_event)
: Log_event(buf, description_event),
m_row_count(0),
#ifndef MYSQL_CLIENT
m_table(NULL),
#endif
m_table_id(0), m_rows_buf(0), m_rows_cur(0), m_rows_end(0)
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
, m_curr_row(NULL), m_curr_row_end(NULL), m_key(NULL)
#endif
{
DBUG_ENTER("Old_rows_log_event::Old_Rows_log_event(const char*,...)");
uint8 const common_header_len= description_event->common_header_len;
uint8 const post_header_len= description_event->post_header_len[event_type-1];
DBUG_PRINT("enter",("event_len: %u common_header_len: %d "
"post_header_len: %d",
event_len, common_header_len,
post_header_len));
const char *post_start= buf + common_header_len;
post_start+= RW_MAPID_OFFSET;
if (post_header_len == 6)
{
/* Master is of an intermediate source tree before 5.1.4. Id is 4 bytes */
m_table_id= uint4korr(post_start);
post_start+= 4;
}
else
{
m_table_id= (ulong) uint6korr(post_start);
post_start+= RW_FLAGS_OFFSET;
}
m_flags= uint2korr(post_start);
uchar const *const var_start=
(const uchar *)buf + common_header_len + post_header_len;
uchar const *const ptr_width= var_start;
uchar *ptr_after_width= (uchar*) ptr_width;
DBUG_PRINT("debug", ("Reading from %p", ptr_after_width));
m_width = net_field_length(&ptr_after_width);
DBUG_PRINT("debug", ("m_width=%lu", m_width));
/* if bitmap_init fails, catched in is_valid() */
if (likely(!bitmap_init(&m_cols,
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
m_width,
false)))
{
DBUG_PRINT("debug", ("Reading from %p", ptr_after_width));
memcpy(m_cols.bitmap, ptr_after_width, (m_width + 7) / 8);
create_last_word_mask(&m_cols);
ptr_after_width+= (m_width + 7) / 8;
DBUG_DUMP("m_cols", (uchar*) m_cols.bitmap, no_bytes_in_map(&m_cols));
}
else
{
// Needed because bitmap_init() does not set it to null on failure
m_cols.bitmap= NULL;
DBUG_VOID_RETURN;
}
m_cols_ai.bitmap= m_cols.bitmap; /* See explanation in is_valid() */
if (event_type == PRE_GA_UPDATE_ROWS_EVENT)
{
DBUG_PRINT("debug", ("Reading from %p", ptr_after_width));
/* if bitmap_init fails, caught in is_valid() */
if (likely(!bitmap_init(&m_cols_ai,
m_width <= sizeof(m_bitbuf_ai)*8 ? m_bitbuf_ai : NULL,
m_width,
false)))
{
DBUG_PRINT("debug", ("Reading from %p", ptr_after_width));
memcpy(m_cols_ai.bitmap, ptr_after_width, (m_width + 7) / 8);
create_last_word_mask(&m_cols_ai);
ptr_after_width+= (m_width + 7) / 8;
DBUG_DUMP("m_cols_ai", (uchar*) m_cols_ai.bitmap,
no_bytes_in_map(&m_cols_ai));
}
else
{
// Needed because bitmap_init() does not set it to null on failure
m_cols_ai.bitmap= 0;
DBUG_VOID_RETURN;
}
}
const uchar* const ptr_rows_data= (const uchar*) ptr_after_width;
size_t const data_size= event_len - (ptr_rows_data - (const uchar *) buf);
DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %lu",
m_table_id, m_flags, m_width, (ulong) data_size));
m_rows_buf= (uchar*) my_malloc(data_size, MYF(MY_WME));
if (likely((bool)m_rows_buf))
{
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
m_curr_row= m_rows_buf;
#endif
m_rows_end= m_rows_buf + data_size;
m_rows_cur= m_rows_end;
memcpy(m_rows_buf, ptr_rows_data, data_size);
}
else
m_cols.bitmap= 0; // to not free it
DBUG_VOID_RETURN;
}
Old_rows_log_event::~Old_rows_log_event()
{
if (m_cols.bitmap == m_bitbuf) // no my_malloc happened
m_cols.bitmap= 0; // so no my_free in bitmap_free
bitmap_free(&m_cols); // To pair with bitmap_init().
my_free((uchar*)m_rows_buf, MYF(MY_ALLOW_ZERO_PTR));
}
int Old_rows_log_event::get_data_size()
{
int const type_code= get_type_code();
uchar buf[sizeof(m_width)+1];
uchar *end= net_store_length(buf, (m_width + 7) / 8);
DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
return 6 + no_bytes_in_map(&m_cols) + (end - buf) +
(type_code == PRE_GA_UPDATE_ROWS_EVENT ? no_bytes_in_map(&m_cols_ai) : 0) +
(m_rows_cur - m_rows_buf););
int data_size= ROWS_HEADER_LEN;
data_size+= no_bytes_in_map(&m_cols);
data_size+= end - buf;
if (type_code == PRE_GA_UPDATE_ROWS_EVENT)
data_size+= no_bytes_in_map(&m_cols_ai);
data_size+= (m_rows_cur - m_rows_buf);
return data_size;
}
#ifndef MYSQL_CLIENT
int Old_rows_log_event::do_add_row_data(uchar *row_data, size_t length)
{
/*
When the table has a primary key, we would probably want, by default, to
log only the primary key value instead of the entire "before image". This
would save binlog space. TODO
*/
DBUG_ENTER("Old_rows_log_event::do_add_row_data");
DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data,
(ulong) length));
/*
Don't print debug messages when running valgrind since they can
trigger false warnings.
*/
#ifndef HAVE_purify
DBUG_DUMP("row_data", row_data, min(length, 32));
#endif
DBUG_ASSERT(m_rows_buf <= m_rows_cur);
DBUG_ASSERT(!m_rows_buf || m_rows_end && m_rows_buf < m_rows_end);
DBUG_ASSERT(m_rows_cur <= m_rows_end);
/* The cast will always work since m_rows_cur <= m_rows_end */
if (static_cast<size_t>(m_rows_end - m_rows_cur) <= length)
{
size_t const block_size= 1024;
my_ptrdiff_t const cur_size= m_rows_cur - m_rows_buf;
my_ptrdiff_t const new_alloc=
block_size * ((cur_size + length + block_size - 1) / block_size);
uchar* const new_buf= (uchar*)my_realloc((uchar*)m_rows_buf, (uint) new_alloc,
MYF(MY_ALLOW_ZERO_PTR|MY_WME));
if (unlikely(!new_buf))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
/* If the memory moved, we need to move the pointers */
if (new_buf != m_rows_buf)
{
m_rows_buf= new_buf;
m_rows_cur= m_rows_buf + cur_size;
}
/*
The end pointer should always be changed to point to the end of
the allocated memory.
*/
m_rows_end= m_rows_buf + new_alloc;
}
DBUG_ASSERT(m_rows_cur + length <= m_rows_end);
memcpy(m_rows_cur, row_data, length);
m_rows_cur+= length;
m_row_count++;
DBUG_RETURN(0);
}
#endif
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
int Old_rows_log_event::do_apply_event(Relay_log_info const *rli)
{
DBUG_ENTER("Old_rows_log_event::do_apply_event(Relay_log_info*)");
int error= 0;
/*
If m_table_id == ~0UL, then we have a dummy event that does not
contain any data. In that case, we just remove all tables in the
tables_to_lock list, close the thread tables, and return with
success.
*/
if (m_table_id == ~0UL)
{
/*
This one is supposed to be set: just an extra check so that
nothing strange has happened.
*/
DBUG_ASSERT(get_flags(STMT_END_F));
const_cast<Relay_log_info*>(rli)->clear_tables_to_lock();
close_thread_tables(thd);
thd->clear_error();
DBUG_RETURN(0);
}
/*
'thd' has been set by exec_relay_log_event(), just before calling
do_apply_event(). We still check here to prevent future coding
errors.
*/
DBUG_ASSERT(rli->sql_thd == thd);
/*
If there is no locks taken, this is the first binrow event seen
after the table map events. We should then lock all the tables
used in the transaction and proceed with execution of the actual
event.
*/
if (!thd->lock)
{
bool need_reopen= 1; /* To execute the first lap of the loop below */
/*
lock_tables() reads the contents of thd->lex, so they must be
initialized. Contrary to in
Table_map_log_event::do_apply_event() we don't call
mysql_init_query() as that may reset the binlog format.
*/
lex_start(thd);
while ((error= lock_tables(thd, rli->tables_to_lock,
rli->tables_to_lock_count, &need_reopen)))
{
if (!need_reopen)
{
if (thd->is_slave_error || thd->is_fatal_error)
{
/*
Error reporting borrowed from Query_log_event with many excessive
simplifications (we don't honour --slave-skip-errors)
*/
uint actual_error= thd->net.last_errno;
rli->report(ERROR_LEVEL, actual_error,
"Error '%s' in %s event: when locking tables",
(actual_error ? thd->net.last_error :
"unexpected success or fatal error"),
get_type_str());
thd->is_fatal_error= 1;
}
else
{
rli->report(ERROR_LEVEL, error,
"Error in %s event: when locking tables",
get_type_str());
}
const_cast<Relay_log_info*>(rli)->clear_tables_to_lock();
DBUG_RETURN(error);
}
/*
So we need to reopen the tables.
We need to flush the pending RBR event, since it keeps a
pointer to an open table.
ALTERNATIVE SOLUTION (not implemented): Extract a pointer to
the pending RBR event and reset the table pointer after the
tables has been reopened.
NOTE: For this new scheme there should be no pending event:
need to add code to assert that is the case.
*/
thd->binlog_flush_pending_rows_event(false);
TABLE_LIST *tables= rli->tables_to_lock;
close_tables_for_reopen(thd, &tables);
uint tables_count= rli->tables_to_lock_count;
if ((error= open_tables(thd, &tables, &tables_count, 0)))
{
if (thd->is_slave_error || thd->is_fatal_error)
{
/*
Error reporting borrowed from Query_log_event with many excessive
simplifications (we don't honour --slave-skip-errors)
*/
uint actual_error= thd->net.last_errno;
rli->report(ERROR_LEVEL, actual_error,
"Error '%s' on reopening tables",
(actual_error ? thd->net.last_error :
"unexpected success or fatal error"));
thd->is_slave_error= 1;
}
const_cast<Relay_log_info*>(rli)->clear_tables_to_lock();
DBUG_RETURN(error);
}
}
/*
When the open and locking succeeded, we check all tables to
ensure that they still have the correct type.
We can use a down cast here since we know that every table added
to the tables_to_lock is a RPL_TABLE_LIST.
*/
{
RPL_TABLE_LIST *ptr= rli->tables_to_lock;
for ( ; ptr ; ptr= static_cast<RPL_TABLE_LIST*>(ptr->next_global))
{
if (ptr->m_tabledef.compatible_with(rli, ptr->table))
{
mysql_unlock_tables(thd, thd->lock);
thd->lock= 0;
thd->is_slave_error= 1;
const_cast<Relay_log_info*>(rli)->clear_tables_to_lock();
DBUG_RETURN(ERR_BAD_TABLE_DEF);
}
}
}
/*
... and then we add all the tables to the table map and remove
them from tables to lock.
We also invalidate the query cache for all the tables, since
they will now be changed.
TODO [/Matz]: Maybe the query cache should not be invalidated
here? It might be that a table is not changed, even though it
was locked for the statement. We do know that each
Old_rows_log_event contain at least one row, so after processing one
Old_rows_log_event, we can invalidate the query cache for the
associated table.
*/
for (TABLE_LIST *ptr= rli->tables_to_lock ; ptr ; ptr= ptr->next_global)
{
const_cast<Relay_log_info*>(rli)->m_table_map.set_table(ptr->table_id, ptr->table);
}
#ifdef HAVE_QUERY_CACHE
query_cache.invalidate_locked_for_write(rli->tables_to_lock);
#endif
}
TABLE*
table=
m_table= const_cast<Relay_log_info*>(rli)->m_table_map.get_table(m_table_id);
if (table)
{
/*
table == NULL means that this table should not be replicated
(this was set up by Table_map_log_event::do_apply_event()
which tested replicate-* rules).
*/
/*
It's not needed to set_time() but
1) it continues the property that "Time" in SHOW PROCESSLIST shows how
much slave is behind
2) it will be needed when we allow replication from a table with no
TIMESTAMP column to a table with one.
So we call set_time(), like in SBR. Presently it changes nothing.
*/
thd->set_time((time_t)when);
/*
There are a few flags that are replicated with each row event.
Make sure to set/clear them before executing the main body of
the event.
*/
if (get_flags(NO_FOREIGN_KEY_CHECKS_F))
thd->options|= OPTION_NO_FOREIGN_KEY_CHECKS;
else
thd->options&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
if (get_flags(RELAXED_UNIQUE_CHECKS_F))
thd->options|= OPTION_RELAXED_UNIQUE_CHECKS;
else
thd->options&= ~OPTION_RELAXED_UNIQUE_CHECKS;
/* A small test to verify that objects have consistent types */
DBUG_ASSERT(sizeof(thd->options) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
/*
Now we are in a statement and will stay in a statement until we
see a STMT_END_F.
We set this flag here, before actually applying any rows, in
case the SQL thread is stopped and we need to detect that we're
inside a statement and halting abruptly might cause problems
when restarting.
*/
const_cast<Relay_log_info*>(rli)->set_flag(Relay_log_info::IN_STMT);
if ( m_width == table->s->fields && bitmap_is_set_all(&m_cols))
set_flags(COMPLETE_ROWS_F);
/*
Set tables write and read sets.
Read_set contains all slave columns (in case we are going to fetch
a complete record from slave)
Write_set equals the m_cols bitmap sent from master but it can be
longer if slave has extra columns.
*/
DBUG_PRINT_BITSET("debug", "Setting table's write_set from: %s", &m_cols);
bitmap_set_all(table->read_set);
bitmap_set_all(table->write_set);
if (!get_flags(COMPLETE_ROWS_F))
bitmap_intersect(table->write_set,&m_cols);
// Do event specific preparations
error= do_before_row_operations(rli);
// row processing loop
while (error == 0 && m_curr_row < m_rows_end)
{
/* in_use can have been set to NULL in close_tables_for_reopen */
THD* old_thd= table->in_use;
if (!table->in_use)
table->in_use= thd;
error= do_exec_row(rli);
table->in_use = old_thd;
switch (error)
{
case 0:
break;
/* Some recoverable errors */
case HA_ERR_RECORD_CHANGED:
case HA_ERR_KEY_NOT_FOUND: /* Idempotency support: OK if
tuple does not exist */
error= 0;
break;
default:
rli->report(ERROR_LEVEL, thd->net.last_errno,
"Error in %s event: row application failed. %s",
get_type_str(),
thd->net.last_error ? thd->net.last_error : "");
thd->is_slave_error= 1;
break;
}
/*
If m_curr_row_end was not set during event execution (e.g., because
of errors) we can't proceed to the next row. If the error is transient
(i.e., error==0 at this point) we must call unpack_current_row() to set
m_curr_row_end.
*/
DBUG_PRINT("info", ("error: %d", error));
DBUG_PRINT("info", ("curr_row: 0x%lu; curr_row_end: 0x%lu; rows_end: 0x%lu",
(ulong) m_curr_row, (ulong) m_curr_row_end, (ulong) m_rows_end));
if (!m_curr_row_end && !error)
unpack_current_row(rli);
// at this moment m_curr_row_end should be set
DBUG_ASSERT(error || m_curr_row_end != NULL);
DBUG_ASSERT(error || m_curr_row < m_curr_row_end);
DBUG_ASSERT(error || m_curr_row_end <= m_rows_end);
m_curr_row= m_curr_row_end;
} // row processing loop
DBUG_EXECUTE_IF("STOP_SLAVE_after_first_Rows_event",
const_cast<Relay_log_info*>(rli)->abort_slave= 1;);
error= do_after_row_operations(rli, error);
if (!cache_stmt)
{
DBUG_PRINT("info", ("Marked that we need to keep log"));
thd->options|= OPTION_KEEP_LOG;
}
} // if (table)
/*
We need to delay this clear until here bacause unpack_current_row() uses
master-side table definitions stored in rli.
*/
if (rli->tables_to_lock && get_flags(STMT_END_F))
const_cast<Relay_log_info*>(rli)->clear_tables_to_lock();
if (error)
{ /* error has occured during the transaction */
rli->report(ERROR_LEVEL, thd->net.last_errno,
"Error in %s event: error during transaction execution "
"on table %s.%s. %s",
get_type_str(), table->s->db.str,
table->s->table_name.str,
thd->net.last_error ? thd->net.last_error : "");
/*
If one day we honour --skip-slave-errors in row-based replication, and
the error should be skipped, then we would clear mappings, rollback,
close tables, but the slave SQL thread would not stop and then may
assume the mapping is still available, the tables are still open...
So then we should clear mappings/rollback/close here only if this is a
STMT_END_F.
For now we code, knowing that error is not skippable and so slave SQL
thread is certainly going to stop.
rollback at the caller along with sbr.
*/
thd->reset_current_stmt_binlog_row_based();
const_cast<Relay_log_info*>(rli)->cleanup_context(thd, error);
thd->is_slave_error= 1;
DBUG_RETURN(error);
}
/*
This code would ideally be placed in do_update_pos() instead, but
since we have no access to table there, we do the setting of
last_event_start_time here instead.
*/
if (table && (table->s->primary_key == MAX_KEY) &&
!cache_stmt && get_flags(STMT_END_F) == RLE_NO_FLAGS)
{
/*
------------ Temporary fix until WL#2975 is implemented ---------
This event is not the last one (no STMT_END_F). If we stop now
(in case of terminate_slave_thread()), how will we restart? We
have to restart from Table_map_log_event, but as this table is
not transactional, the rows already inserted will still be
present, and idempotency is not guaranteed (no PK) so we risk
that repeating leads to double insert. So we desperately try to
continue, hope we'll eventually leave this buggy situation (by
executing the final Old_rows_log_event). If we are in a hopeless
wait (reached end of last relay log and nothing gets appended
there), we timeout after one minute, and notify DBA about the
problem. When WL#2975 is implemented, just remove the member
Relay_log_info::last_event_start_time and all its occurrences.
*/
const_cast<Relay_log_info*>(rli)->last_event_start_time= my_time(0);
}
DBUG_RETURN(0);
}
Log_event::enum_skip_reason
Old_rows_log_event::do_shall_skip(Relay_log_info *rli)
{
/*
If the slave skip counter is 1 and this event does not end a
statement, then we should not start executing on the next event.
Otherwise, we defer the decision to the normal skipping logic.
*/
if (rli->slave_skip_counter == 1 && !get_flags(STMT_END_F))
return Log_event::EVENT_SKIP_IGNORE;
else
return Log_event::do_shall_skip(rli);
}
int
Old_rows_log_event::do_update_pos(Relay_log_info *rli)
{
DBUG_ENTER("Old_rows_log_event::do_update_pos");
int error= 0;
DBUG_PRINT("info", ("flags: %s",
get_flags(STMT_END_F) ? "STMT_END_F " : ""));
if (get_flags(STMT_END_F))
{
/*
This is the end of a statement or transaction, so close (and
unlock) the tables we opened when processing the
Table_map_log_event starting the statement.
OBSERVER. This will clear *all* mappings, not only those that
are open for the table. There is not good handle for on-close
actions for tables.
NOTE. Even if we have no table ('table' == 0) we still need to be
here, so that we increase the group relay log position. If we didn't, we
could have a group relay log position which lags behind "forever"
(assume the last master's transaction is ignored by the slave because of
replicate-ignore rules).
*/
thd->binlog_flush_pending_rows_event(true);
/*
If this event is not in a transaction, the call below will, if some
transactional storage engines are involved, commit the statement into
them and flush the pending event to binlog.
If this event is in a transaction, the call will do nothing, but a
Xid_log_event will come next which will, if some transactional engines
are involved, commit the transaction and flush the pending event to the
binlog.
*/
error= ha_autocommit_or_rollback(thd, 0);
/*
Now what if this is not a transactional engine? we still need to
flush the pending event to the binlog; we did it with
thd->binlog_flush_pending_rows_event(). Note that we imitate
what is done for real queries: a call to
ha_autocommit_or_rollback() (sometimes only if involves a
transactional engine), and a call to be sure to have the pending
event flushed.
*/
thd->reset_current_stmt_binlog_row_based();
rli->cleanup_context(thd, 0);
if (error == 0)
{
/*
Indicate that a statement is finished.
Step the group log position if we are not in a transaction,
otherwise increase the event log position.
*/
rli->stmt_done(log_pos, when);
/*
Clear any errors pushed in thd->net.last_err* if for example "no key
found" (as this is allowed). This is a safety measure; apparently
those errors (e.g. when executing a Delete_rows_log_event_old of a
non-existing row, like in rpl_row_mystery22.test,
thd->net.last_error = "Can't find record in 't1'" and last_errno=1032)
do not become visible. We still prefer to wipe them out.
*/
thd->clear_error();
}
else
rli->report(ERROR_LEVEL, error,
"Error in %s event: commit of row events failed, "
"table `%s`.`%s`",
get_type_str(), m_table->s->db.str,
m_table->s->table_name.str);
}
else
{
rli->inc_event_relay_log_pos();
}
DBUG_RETURN(error);
}
#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
#ifndef MYSQL_CLIENT
bool Old_rows_log_event::write_data_header(IO_CACHE *file)
{
uchar buf[ROWS_HEADER_LEN]; // No need to init the buffer
// This method should not be reached.
assert(0);
DBUG_ASSERT(m_table_id != ~0UL);
DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
{
int4store(buf + 0, m_table_id);
int2store(buf + 4, m_flags);
return (my_b_safe_write(file, buf, 6));
});
int6store(buf + RW_MAPID_OFFSET, (ulonglong)m_table_id);
int2store(buf + RW_FLAGS_OFFSET, m_flags);
return (my_b_safe_write(file, buf, ROWS_HEADER_LEN));
}
bool Old_rows_log_event::write_data_body(IO_CACHE*file)
{
/*
Note that this should be the number of *bits*, not the number of
bytes.
*/
uchar sbuf[sizeof(m_width)];
my_ptrdiff_t const data_size= m_rows_cur - m_rows_buf;
// This method should not be reached.
assert(0);
bool res= false;
uchar *const sbuf_end= net_store_length(sbuf, (size_t) m_width);
DBUG_ASSERT(static_cast<size_t>(sbuf_end - sbuf) <= sizeof(sbuf));
DBUG_DUMP("m_width", sbuf, (size_t) (sbuf_end - sbuf));
res= res || my_b_safe_write(file, sbuf, (size_t) (sbuf_end - sbuf));
DBUG_DUMP("m_cols", (uchar*) m_cols.bitmap, no_bytes_in_map(&m_cols));
res= res || my_b_safe_write(file, (uchar*) m_cols.bitmap,
no_bytes_in_map(&m_cols));
/*
TODO[refactor write]: Remove the "down cast" here (and elsewhere).
*/
if (get_type_code() == PRE_GA_UPDATE_ROWS_EVENT)
{
DBUG_DUMP("m_cols_ai", (uchar*) m_cols_ai.bitmap,
no_bytes_in_map(&m_cols_ai));
res= res || my_b_safe_write(file, (uchar*) m_cols_ai.bitmap,
no_bytes_in_map(&m_cols_ai));
}
DBUG_DUMP("rows", m_rows_buf, data_size);
res= res || my_b_safe_write(file, m_rows_buf, (size_t) data_size);
return res;
}
#endif
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
void Old_rows_log_event::pack_info(Protocol *protocol)
{
char buf[256];
char const *const flagstr=
get_flags(STMT_END_F) ? " flags: STMT_END_F" : "";
size_t bytes= my_snprintf(buf, sizeof(buf),
"table_id: %lu%s", m_table_id, flagstr);
protocol->store(buf, bytes, &my_charset_bin);
}
#endif
#ifdef MYSQL_CLIENT
void Old_rows_log_event::print_helper(FILE *file,
PRINT_EVENT_INFO *print_event_info,
char const *const name)
{
IO_CACHE *const head= &print_event_info->head_cache;
IO_CACHE *const body= &print_event_info->body_cache;
if (!print_event_info->short_form)
{
bool const last_stmt_event= get_flags(STMT_END_F);
print_header(head, print_event_info, !last_stmt_event);
my_b_printf(head, "\t%s: table id %lu%s\n",
name, m_table_id,
last_stmt_event ? " flags: STMT_END_F" : "");
print_base64(body, print_event_info, !last_stmt_event);
}
if (get_flags(STMT_END_F))
{
copy_event_cache_to_file_and_reinit(head, file);
copy_event_cache_to_file_and_reinit(body, file);
}
}
#endif
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
/**
Write the current row into event's table.
The row is located in the row buffer, pointed by @c m_curr_row member.
Number of columns of the row is stored in @c m_width member (it can be
different from the number of columns in the table to which we insert).
Bitmap @c m_cols indicates which columns are present in the row. It is assumed
that event's table is already open and pointed by @c m_table.
If the same record already exists in the table it can be either overwritten
or an error is reported depending on the value of @c overwrite flag
(error reporting not yet implemented). Note that the matching record can be
different from the row we insert if we use primary keys to identify records in
the table.
The row to be inserted can contain values only for selected columns. The
missing columns are filled with default values using @c prepare_record()
function. If a matching record is found in the table and @c overwritte is
true, the missing columns are taken from it.
@param rli Relay log info (needed for row unpacking).
@param overwrite
Shall we overwrite if the row already exists or signal
error (currently ignored).
@returns Error code on failure, 0 on success.
This method, if successful, sets @c m_curr_row_end pointer to point at the
next row in the rows buffer. This is done when unpacking the row to be
inserted.
@note If a matching record is found, it is either updated using
@c ha_update_row() or first deleted and then new record written.
*/
int
Old_rows_log_event::write_row(const Relay_log_info *const rli,
const bool overwrite)
{
DBUG_ENTER("write_row");
DBUG_ASSERT(m_table != NULL && thd != NULL);
TABLE *table= m_table; // pointer to event's table
int error;
int keynum;
auto_afree_ptr<char> key(NULL);
/* fill table->record[0] with default values */
if ((error= prepare_record(rli, table, m_width,
TRUE /* check if columns have def. values */)))
DBUG_RETURN(error);
/* unpack row into table->record[0] */
error= unpack_current_row(rli); // TODO: how to handle errors?
#ifndef DBUG_OFF
DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
DBUG_PRINT_BITSET("debug", "write_set = %s", table->write_set);
DBUG_PRINT_BITSET("debug", "read_set = %s", table->read_set);
#endif
/*
Try to write record. If a corresponding record already exists in the table,
we try to change it using ha_update_row() if possible. Otherwise we delete
it and repeat the whole process again.
TODO: Add safety measures against infinite looping.
*/
while ((error= table->file->ha_write_row(table->record[0])))
{
if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT)
{
table->file->print_error(error, MYF(0)); /* to check at exec_relay_log_event */
DBUG_RETURN(error);
}
if ((keynum= table->file->get_dup_key(error)) < 0)
{
DBUG_PRINT("info",("Can't locate duplicate key (get_dup_key returns %d)",keynum));
table->file->print_error(error, MYF(0));
/*
We failed to retrieve the duplicate key
- either because the error was not "duplicate key" error
- or because the information which key is not available
*/
DBUG_RETURN(error);
}
/*
We need to retrieve the old row into record[1] to be able to
either update or delete the offending record. We either:
- use rnd_pos() with a row-id (available as dupp_row) to the
offending row, if that is possible (MyISAM and Blackhole), or else
- use index_read_idx() with the key that is duplicated, to
retrieve the offending row.
*/
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
{
DBUG_PRINT("info",("Locating offending record using rnd_pos()"));
error= table->file->rnd_pos(table->record[1], table->file->dup_ref);
if (error)
{
DBUG_PRINT("info",("rnd_pos() returns error %d",error));
table->file->print_error(error, MYF(0));
DBUG_RETURN(error);
}
}
else
{
DBUG_PRINT("info",("Locating offending record using index_read_idx()"));
if (table->file->extra(HA_EXTRA_FLUSH_CACHE))
{
DBUG_PRINT("info",("Error when setting HA_EXTRA_FLUSH_CACHE"));
DBUG_RETURN(my_errno);
}
if (key.get() == NULL)
{
key.assign(static_cast<char*>(my_alloca(table->s->max_unique_length)));
if (key.get() == NULL)
{
DBUG_PRINT("info",("Can't allocate key buffer"));
DBUG_RETURN(ENOMEM);
}
}
key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
0);
error= table->file->index_read_idx_map(table->record[1], keynum,
(const uchar*)key.get(),
HA_WHOLE_KEY,
HA_READ_KEY_EXACT);
if (error)
{
DBUG_PRINT("info",("index_read_idx() returns error %d",error));
table->file->print_error(error, MYF(0));
DBUG_RETURN(error);
}
}
/*
Now, record[1] should contain the offending row. That
will enable us to update it or, alternatively, delete it (so
that we can insert the new row afterwards).
*/
/*
If row is incomplete we will use the record found to fill
missing columns.
*/
if (!get_flags(COMPLETE_ROWS_F))
{
restore_record(table,record[1]);
error= unpack_current_row(rli);
}
#ifndef DBUG_OFF
DBUG_PRINT("debug",("preparing for update: before and after image"));
DBUG_DUMP("record[1] (before)", table->record[1], table->s->reclength);
DBUG_DUMP("record[0] (after)", table->record[0], table->s->reclength);
#endif
/*
REPLACE is defined as either INSERT or DELETE + INSERT. If
possible, we can replace it with an UPDATE, but that will not
work on InnoDB if FOREIGN KEY checks are necessary.
I (Matz) am not sure of the reason for the last_uniq_key()
check as, but I'm guessing that it's something along the
following lines.
Suppose that we got the duplicate key to be a key that is not
the last unique key for the table and we perform an update:
then there might be another key for which the unique check will
fail, so we're better off just deleting the row and inserting
the correct row.
*/
if (last_uniq_key(table, keynum) &&
!table->file->referenced_by_foreign_key())
{
DBUG_PRINT("info",("Updating row using ha_update_row()"));
error=table->file->ha_update_row(table->record[1],
table->record[0]);
switch (error) {
case HA_ERR_RECORD_IS_THE_SAME:
DBUG_PRINT("info",("ignoring HA_ERR_RECORD_IS_THE_SAME error from"
" ha_update_row()"));
error= 0;
case 0:
break;
default:
DBUG_PRINT("info",("ha_update_row() returns error %d",error));
table->file->print_error(error, MYF(0));
}
DBUG_RETURN(error);
}
else
{
DBUG_PRINT("info",("Deleting offending row and trying to write new one again"));
if ((error= table->file->ha_delete_row(table->record[1])))
{
DBUG_PRINT("info",("ha_delete_row() returns error %d",error));
table->file->print_error(error, MYF(0));
DBUG_RETURN(error);
}
/* Will retry ha_write_row() with the offending row removed. */
}
}
DBUG_RETURN(error);
}
/**
Locate the current row in event's table.
The current row is pointed by @c m_curr_row. Member @c m_width tells how many
columns are there in the row (this can be differnet from the number of columns
in the table). It is assumed that event's table is already open and pointed
by @c m_table.
If a corresponding record is found in the table it is stored in
@c m_table->record[0]. Note that when record is located based on a primary
key, it is possible that the record found differs from the row being located.
If no key is specified or table does not have keys, a table scan is used to
find the row. In that case the row should be complete and contain values for
all columns. However, it can still be shorter than the table, i.e. the table
can contain extra columns not present in the row. It is also possible that
the table has fewer columns than the row being located.
@returns Error code on failure, 0 on success.
@post In case of success @c m_table->record[0] contains the record found.
Also, the internal "cursor" of the table is positioned at the record found.
@note If the engine allows random access of the records, a combination of
@c position() and @c rnd_pos() will be used.
*/
int Old_rows_log_event::find_row(const Relay_log_info *rli)
{
DBUG_ENTER("find_row");
DBUG_ASSERT(m_table && m_table->in_use != NULL);
TABLE *table= m_table;
int error;
/* unpack row - missing fields get default values */
// TODO: shall we check and report errors here?
prepare_record(NULL,table,m_width,FALSE /* don't check errors */);
error= unpack_current_row(rli);
#ifndef DBUG_OFF
DBUG_PRINT("info",("looking for the following record"));
DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
#endif
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
table->s->primary_key < MAX_KEY)
{
/*
Use a more efficient method to fetch the record given by
table->record[0] if the engine allows it. We first compute a
row reference using the position() member function (it will be
stored in table->file->ref) and the use rnd_pos() to position
the "cursor" (i.e., record[0] in this case) at the correct row.
TODO: Add a check that the correct record has been fetched by
comparing with the original record. Take into account that the
record on the master and slave can be of different
length. Something along these lines should work:
ADD>>> store_record(table,record[1]);
int error= table->file->rnd_pos(table->record[0], table->file->ref);
ADD>>> DBUG_ASSERT(memcmp(table->record[1], table->record[0],
table->s->reclength) == 0);
*/
DBUG_PRINT("info",("locating record using primary key (position)"));
int error= table->file->rnd_pos_by_record(table->record[0]);
if (error)
{
DBUG_PRINT("info",("rnd_pos returns error %d",error));
table->file->print_error(error, MYF(0));
}
DBUG_RETURN(error);
}
// We can't use position() - try other methods.
/*
We need to retrieve all fields
TODO: Move this out from this function to main loop
*/
table->use_all_columns();
/*
Save copy of the record in table->record[1]. It might be needed
later if linear search is used to find exact match.
*/
store_record(table,record[1]);
if (table->s->keys > 0)
{
DBUG_PRINT("info",("locating record using primary key (index_read)"));
/* We have a key: search the table using the index */
if (!table->file->inited && (error= table->file->ha_index_init(0, FALSE)))
{
DBUG_PRINT("info",("ha_index_init returns error %d",error));
table->file->print_error(error, MYF(0));
DBUG_RETURN(error);
}
/* Fill key data for the row */
DBUG_ASSERT(m_key);
key_copy(m_key, table->record[0], table->key_info, 0);
/*
Don't print debug messages when running valgrind since they can
trigger false warnings.
*/
#ifndef HAVE_purify
DBUG_DUMP("key data", m_key, table->key_info->key_length);
#endif
/*
We need to set the null bytes to ensure that the filler bit are
all set when returning. There are storage engines that just set
the necessary bits on the bytes and don't set the filler bits
correctly.
*/
my_ptrdiff_t const pos=
table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
table->record[0][pos]= 0xFF;
if ((error= table->file->index_read_map(table->record[0], m_key,
HA_WHOLE_KEY,
HA_READ_KEY_EXACT)))
{
DBUG_PRINT("info",("no record matching the key found in the table"));
table->file->print_error(error, MYF(0));
table->file->ha_index_end();
DBUG_RETURN(error);
}
/*
Don't print debug messages when running valgrind since they can
trigger false warnings.
*/
#ifndef HAVE_purify
DBUG_PRINT("info",("found first matching record"));
DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
#endif
/*
Below is a minor "optimization". If the key (i.e., key number
0) has the HA_NOSAME flag set, we know that we have found the
correct record (since there can be no duplicates); otherwise, we
have to compare the record with the one found to see if it is
the correct one.
CAVEAT! This behaviour is essential for the replication of,
e.g., the mysql.proc table since the correct record *shall* be
found using the primary key *only*. There shall be no
comparison of non-PK columns to decide if the correct record is
found. I can see no scenario where it would be incorrect to
chose the row to change only using a PK or an UNNI.
*/
if (table->key_info->flags & HA_NOSAME)
{
table->file->ha_index_end();
DBUG_RETURN(0);
}
/*
In case key is not unique, we still have to iterate over records found
and find the one which is identical to the row given. A copy of the
record we are looking for is stored in record[1].
*/
DBUG_PRINT("info",("non-unique index, scanning it to find matching record"));
while (record_compare(table))
{
/*
We need to set the null bytes to ensure that the filler bit
are all set when returning. There are storage engines that
just set the necessary bits on the bytes and don't set the
filler bits correctly.
TODO[record format ndb]: Remove this code once NDB returns the
correct record format.
*/
if (table->s->null_bytes > 0)
{
table->record[0][table->s->null_bytes - 1]|=
256U - (1U << table->s->last_null_bit_pos);
}
if ((error= table->file->index_next(table->record[0])))
{
DBUG_PRINT("info",("no record matching the given row found"));
table->file->print_error(error, MYF(0));
table->file->ha_index_end();
DBUG_RETURN(error);
}
}
/*
Have to restart the scan to be able to fetch the next row.
*/
table->file->ha_index_end();
}
else
{
DBUG_PRINT("info",("locating record using table scan (rnd_next)"));
int restart_count= 0; // Number of times scanning has restarted from top
/* We don't have a key: search the table using rnd_next() */
if ((error= table->file->ha_rnd_init(1)))
{
DBUG_PRINT("info",("error initializing table scan"
" (ha_rnd_init returns %d)",error));
table->file->print_error(error, MYF(0));
DBUG_RETURN(error);
}
/* Continue until we find the right record or have made a full loop */
do
{
error= table->file->rnd_next(table->record[0]);
switch (error) {
case 0:
case HA_ERR_RECORD_DELETED:
break;
case HA_ERR_END_OF_FILE:
if (++restart_count < 2)
table->file->ha_rnd_init(1);
break;
default:
DBUG_PRINT("info", ("Failed to get next record"
" (rnd_next returns %d)",error));
table->file->print_error(error, MYF(0));
table->file->ha_rnd_end();
DBUG_RETURN(error);
}
}
while (restart_count < 2 && record_compare(table));
/*
Note: above record_compare will take into accout all record fields
which might be incorrect in case a partial row was given in the event
*/
/*
Have to restart the scan to be able to fetch the next row.
*/
if (restart_count == 2)
DBUG_PRINT("info", ("Record not found"));
else
DBUG_DUMP("record found", table->record[0], table->s->reclength);
table->file->ha_rnd_end();
DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0);
DBUG_RETURN(error);
}
DBUG_RETURN(0);
}
#endif
/**************************************************************************
Write_rows_log_event member functions
**************************************************************************/
/*
Constructor used to build an event for writing to the binary log.
*/
#if !defined(MYSQL_CLIENT)
Write_rows_log_event_old::Write_rows_log_event_old(THD *thd_arg,
TABLE *tbl_arg,
ulong tid_arg,
MY_BITMAP const *cols,
bool is_transactional)
: Old_rows_log_event(thd_arg, tbl_arg, tid_arg, cols, is_transactional)
{
// This constructor should not be reached.
assert(0);
}
#endif
/*
Constructor used by slave to read the event from the binary log.
*/
#ifdef HAVE_REPLICATION
Write_rows_log_event_old::Write_rows_log_event_old(const char *buf,
uint event_len,
const Format_description_log_event
*description_event)
: Old_rows_log_event(buf, event_len, PRE_GA_WRITE_ROWS_EVENT,
description_event)
{
}
#endif
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
int
Write_rows_log_event_old::do_before_row_operations(const Slave_reporting_capability *const)
{
int error= 0;
/*
We are using REPLACE semantics and not INSERT IGNORE semantics
when writing rows, that is: new rows replace old rows. We need to
inform the storage engine that it should use this behaviour.
*/
/* Tell the storage engine that we are using REPLACE semantics. */
thd->lex->duplicates= DUP_REPLACE;
/*
Pretend we're executing a REPLACE command: this is needed for
InnoDB and NDB Cluster since they are not (properly) checking the
lex->duplicates flag.
*/
thd->lex->sql_command= SQLCOM_REPLACE;
/*
Do not raise the error flag in case of hitting to an unique attribute
*/
m_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
/*
NDB specific: update from ndb master wrapped as Write_rows
*/
/*
so that the event should be applied to replace slave's row
*/
m_table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
/*
NDB specific: if update from ndb master wrapped as Write_rows
does not find the row it's assumed idempotent binlog applying
is taking place; don't raise the error.
*/
m_table->file->extra(HA_EXTRA_IGNORE_NO_KEY);
/*
TODO: the cluster team (Tomas?) says that it's better if the engine knows
how many rows are going to be inserted, then it can allocate needed memory
from the start.
*/
m_table->file->ha_start_bulk_insert(0);
/*
We need TIMESTAMP_NO_AUTO_SET otherwise ha_write_row() will not use fill
any TIMESTAMP column with data from the row but instead will use
the event's current time.
As we replicate from TIMESTAMP to TIMESTAMP and slave has no extra
columns, we know that all TIMESTAMP columns on slave will receive explicit
data from the row, so TIMESTAMP_NO_AUTO_SET is ok.
When we allow a table without TIMESTAMP to be replicated to a table having
more columns including a TIMESTAMP column, or when we allow a TIMESTAMP
column to be replicated into a BIGINT column and the slave's table has a
TIMESTAMP column, then the slave's TIMESTAMP column will take its value
from set_time() which we called earlier (consistent with SBR). And then in
some cases we won't want TIMESTAMP_NO_AUTO_SET (will require some code to
analyze if explicit data is provided for slave's TIMESTAMP columns).
*/
m_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
return error;
}
int
Write_rows_log_event_old::do_after_row_operations(const Slave_reporting_capability *const,
int error)
{
int local_error= 0;
m_table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
m_table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
/*
reseting the extra with
table->file->extra(HA_EXTRA_NO_IGNORE_NO_KEY);
fires bug#27077
todo: explain or fix
*/
if ((local_error= m_table->file->ha_end_bulk_insert()))
{
m_table->file->print_error(local_error, MYF(0));
}
return error? error : local_error;
}
int
Write_rows_log_event_old::do_exec_row(const Relay_log_info *const rli)
{
DBUG_ASSERT(m_table != NULL);
int error= write_row(rli, TRUE /* overwrite */);
if (error && !thd->net.last_errno)
thd->net.last_errno= error;
return error;
}
#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
#ifdef MYSQL_CLIENT
void Write_rows_log_event_old::print(FILE *file,
PRINT_EVENT_INFO* print_event_info)
{
Old_rows_log_event::print_helper(file, print_event_info, "Write_rows_old");
}
#endif
/**************************************************************************
Delete_rows_log_event member functions
**************************************************************************/
/*
Constructor used to build an event for writing to the binary log.
*/
#ifndef MYSQL_CLIENT
Delete_rows_log_event_old::Delete_rows_log_event_old(THD *thd_arg,
TABLE *tbl_arg,
ulong tid,
MY_BITMAP const *cols,
bool is_transactional)
: Old_rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional),
m_after_image(NULL), m_memory(NULL)
{
// This constructor should not be reached.
assert(0);
}
#endif /* #if !defined(MYSQL_CLIENT) */
/*
Constructor used by slave to read the event from the binary log.
*/
#ifdef HAVE_REPLICATION
Delete_rows_log_event_old::Delete_rows_log_event_old(const char *buf,
uint event_len,
const Format_description_log_event
*description_event)
: Old_rows_log_event(buf, event_len, PRE_GA_DELETE_ROWS_EVENT,
description_event),
m_after_image(NULL), m_memory(NULL)
{
}
#endif
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
int
Delete_rows_log_event_old::do_before_row_operations(const Slave_reporting_capability *const)
{
if ((m_table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
m_table->s->primary_key < MAX_KEY)
{
/*
We don't need to allocate any memory for m_key since it is not used.
*/
return 0;
}
if (m_table->s->keys > 0)
{
// Allocate buffer for key searches
m_key= (uchar*)my_malloc(m_table->key_info->key_length, MYF(MY_WME));
if (!m_key)
return HA_ERR_OUT_OF_MEM;
}
return 0;
}
int
Delete_rows_log_event_old::do_after_row_operations(const Slave_reporting_capability *const,
int error)
{
/*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
m_table->file->ha_index_or_rnd_end();
my_free(m_key, MYF(MY_ALLOW_ZERO_PTR));
m_key= NULL;
return error;
}
int Delete_rows_log_event_old::do_exec_row(const Relay_log_info *const rli)
{
int error;
DBUG_ASSERT(m_table != NULL);
if (!(error= find_row(rli)))
{
/*
Delete the record found, located in record[0]
*/
error= m_table->file->ha_delete_row(m_table->record[0]);
}
return error;
}
#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
#ifdef MYSQL_CLIENT
void Delete_rows_log_event_old::print(FILE *file,
PRINT_EVENT_INFO* print_event_info)
{
Old_rows_log_event::print_helper(file, print_event_info, "Delete_rows_old");
}
#endif
/**************************************************************************
Update_rows_log_event member functions
**************************************************************************/
/*
Constructor used to build an event for writing to the binary log.
*/
#if !defined(MYSQL_CLIENT)
Update_rows_log_event_old::Update_rows_log_event_old(THD *thd_arg,
TABLE *tbl_arg,
ulong tid,
MY_BITMAP const *cols,
bool is_transactional)
: Old_rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional),
m_after_image(NULL), m_memory(NULL)
{
// This constructor should not be reached.
assert(0);
init(cols);
}
void Update_rows_log_event_old::init(MY_BITMAP const *cols)
{
/* if bitmap_init fails, caught in is_valid() */
if (likely(!bitmap_init(&m_cols_ai,
m_width <= sizeof(m_bitbuf_ai)*8 ? m_bitbuf_ai : NULL,
m_width,
false)))
{
/* Cols can be zero if this is a dummy binrows event */
if (likely(cols != NULL))
{
memcpy(m_cols_ai.bitmap, cols->bitmap, no_bytes_in_map(cols));
create_last_word_mask(&m_cols_ai);
}
}
}
#endif /* !defined(MYSQL_CLIENT) */
Update_rows_log_event_old::~Update_rows_log_event_old()
{
if (m_cols_ai.bitmap == m_bitbuf_ai) // no my_malloc happened
m_cols_ai.bitmap= 0; // so no my_free in bitmap_free
bitmap_free(&m_cols_ai); // To pair with bitmap_init().
}
/*
Constructor used by slave to read the event from the binary log.
*/
#ifdef HAVE_REPLICATION
Update_rows_log_event_old::Update_rows_log_event_old(const char *buf,
uint event_len,
const
Format_description_log_event
*description_event)
: Old_rows_log_event(buf, event_len, PRE_GA_UPDATE_ROWS_EVENT,
description_event),
m_after_image(NULL), m_memory(NULL)
{
}
#endif
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
int
Update_rows_log_event_old::do_before_row_operations(const Slave_reporting_capability *const)
{
if (m_table->s->keys > 0)
{
// Allocate buffer for key searches
m_key= (uchar*)my_malloc(m_table->key_info->key_length, MYF(MY_WME));
if (!m_key)
return HA_ERR_OUT_OF_MEM;
}
m_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
return 0;
}
int
Update_rows_log_event_old::do_after_row_operations(const Slave_reporting_capability *const,
int error)
{
/*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
m_table->file->ha_index_or_rnd_end();
my_free(m_key, MYF(MY_ALLOW_ZERO_PTR)); // Free for multi_malloc
m_key= NULL;
return error;
}
int
Update_rows_log_event_old::do_exec_row(const Relay_log_info *const rli)
{
DBUG_ASSERT(m_table != NULL);
int error= find_row(rli);
if (error)
{
/*
We need to read the second image in the event of error to be
able to skip to the next pair of updates
*/
m_curr_row= m_curr_row_end;
unpack_current_row(rli);
return error;
}
/*
This is the situation after locating BI:
===|=== before image ====|=== after image ===|===
^ ^
m_curr_row m_curr_row_end
BI found in the table is stored in record[0]. We copy it to record[1]
and unpack AI to record[0].
*/
store_record(m_table,record[1]);
m_curr_row= m_curr_row_end;
error= unpack_current_row(rli); // this also updates m_curr_row_end
/*
Now we have the right row to update. The old row (the one we're
looking for) is in record[1] and the new row is in record[0].
*/
#ifndef HAVE_purify
/*
Don't print debug messages when running valgrind since they can
trigger false warnings.
*/
DBUG_PRINT("info",("Updating row in table"));
DBUG_DUMP("old record", m_table->record[1], m_table->s->reclength);
DBUG_DUMP("new values", m_table->record[0], m_table->s->reclength);
#endif
error= m_table->file->ha_update_row(m_table->record[1], m_table->record[0]);
if (error == HA_ERR_RECORD_IS_THE_SAME)
error= 0;
return error;
}
#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
#ifdef MYSQL_CLIENT
void Update_rows_log_event_old::print(FILE *file,
PRINT_EVENT_INFO* print_event_info)
{
Old_rows_log_event::print_helper(file, print_event_info, "Update_rows_old");
}
#endif
...@@ -20,18 +20,269 @@ ...@@ -20,18 +20,269 @@
Need to include this file at the proper position of log_event.h Need to include this file at the proper position of log_event.h
*/ */
/**
@file
@brief This file contains classes handling old formats of row-based
binlog events.
*/
/*
Around 2007-10-31, I made these classes completely separated from
the new classes (before, there was a complex class hierarchy
involving multiple inheritance; see BUG#31581), by simply copying
and pasting the entire contents of Rows_log_event into
Old_rows_log_event and the entire contents of
{Write|Update|Delete}_rows_log_event into
{Write|Update|Delete}_rows_log_event_old. For clarity, I will keep
the comments marking which code was cut-and-pasted for some time.
With the classes collapsed into one, there is probably some
redundancy (maybe some methods can be simplified and/or removed),
but we keep them this way for now. /Sven
*/
/**
@class Old_rows_log_event
class Old_rows_log_event Base class for the three types of row-based events
{Write|Update|Delete}_row_log_event_old, with event type codes
PRE_GA_{WRITE|UPDATE|DELETE}_ROWS_EVENT. These events are never
created any more, except when reading a relay log created by an old
server.
*/
class Old_rows_log_event : public Log_event
{ {
public: /********** BEGIN CUT & PASTE FROM Rows_log_event **********/
public:
virtual ~Old_rows_log_event() {} /**
Enumeration of the errors that can be returned.
*/
enum enum_error
{
ERR_OPEN_FAILURE = -1, /**< Failure to open table */
ERR_OK = 0, /**< No error */
ERR_TABLE_LIMIT_EXCEEDED = 1, /**< No more room for tables */
ERR_OUT_OF_MEM = 2, /**< Out of memory */
ERR_BAD_TABLE_DEF = 3, /**< Table definition does not match */
ERR_RBR_TO_SBR = 4 /**< daisy-chanining RBR to SBR not allowed */
};
/*
These definitions allow you to combine the flags into an
appropriate flag set using the normal bitwise operators. The
implicit conversion from an enum-constant to an integer is
accepted by the compiler, which is then used to set the real set
of flags.
*/
enum enum_flag
{
/* Last event of a statement */
STMT_END_F = (1U << 0),
/* Value of the OPTION_NO_FOREIGN_KEY_CHECKS flag in thd->options */
NO_FOREIGN_KEY_CHECKS_F = (1U << 1),
/* Value of the OPTION_RELAXED_UNIQUE_CHECKS flag in thd->options */
RELAXED_UNIQUE_CHECKS_F = (1U << 2),
/**
Indicates that rows in this event are complete, that is contain
values for all columns of the table.
*/
COMPLETE_ROWS_F = (1U << 3)
};
typedef uint16 flag_set;
/* Special constants representing sets of flags */
enum
{
RLE_NO_FLAGS = 0U
};
virtual ~Old_rows_log_event();
void set_flags(flag_set flags_arg) { m_flags |= flags_arg; }
void clear_flags(flag_set flags_arg) { m_flags &= ~flags_arg; }
flag_set get_flags(flag_set flags_arg) const { return m_flags & flags_arg; }
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
virtual void pack_info(Protocol *protocol);
#endif
#ifdef MYSQL_CLIENT
/* not for direct call, each derived has its own ::print() */
virtual void print(FILE *file, PRINT_EVENT_INFO *print_event_info)= 0;
#endif
#ifndef MYSQL_CLIENT
int add_row_data(uchar *data, size_t length)
{
return do_add_row_data(data,length);
}
#endif
/* Member functions to implement superclass interface */
virtual int get_data_size();
MY_BITMAP const *get_cols() const { return &m_cols; }
size_t get_width() const { return m_width; }
ulong get_table_id() const { return m_table_id; }
#ifndef MYSQL_CLIENT
virtual bool write_data_header(IO_CACHE *file);
virtual bool write_data_body(IO_CACHE *file);
virtual const char *get_db() { return m_table->s->db.str; }
#endif
/*
Check that malloc() succeeded in allocating memory for the rows
buffer and the COLS vector. Checking that an Update_rows_log_event_old
is valid is done in the Update_rows_log_event_old::is_valid()
function.
*/
virtual bool is_valid() const
{
return m_rows_buf && m_cols.bitmap;
}
uint m_row_count; /* The number of rows added to the event */
protected:
/*
The constructors are protected since you're supposed to inherit
this class, not create instances of this class.
*/
#ifndef MYSQL_CLIENT
Old_rows_log_event(THD*, TABLE*, ulong table_id,
MY_BITMAP const *cols, bool is_transactional);
#endif
Old_rows_log_event(const char *row_data, uint event_len,
Log_event_type event_type,
const Format_description_log_event *description_event);
#ifdef MYSQL_CLIENT
void print_helper(FILE *, PRINT_EVENT_INFO *, char const *const name);
#endif
#ifndef MYSQL_CLIENT
virtual int do_add_row_data(uchar *data, size_t length);
#endif
#ifndef MYSQL_CLIENT
TABLE *m_table; /* The table the rows belong to */
#endif
ulong m_table_id; /* Table ID */
MY_BITMAP m_cols; /* Bitmap denoting columns available */
ulong m_width; /* The width of the columns bitmap */
/*
Bitmap for columns available in the after image, if present. These
fields are only available for Update_rows events. Observe that the
width of both the before image COLS vector and the after image
COLS vector is the same: the number of columns of the table on the
master.
*/
MY_BITMAP m_cols_ai;
ulong m_master_reclength; /* Length of record on master side */
/* Bit buffers in the same memory as the class */
uint32 m_bitbuf[128/(sizeof(uint32)*8)];
uint32 m_bitbuf_ai[128/(sizeof(uint32)*8)];
uchar *m_rows_buf; /* The rows in packed format */
uchar *m_rows_cur; /* One-after the end of the data */
uchar *m_rows_end; /* One-after the end of the allocated space */
flag_set m_flags; /* Flags for row-level events */
/* helper functions */
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
const uchar *m_curr_row; /* Start of the row being processed */
const uchar *m_curr_row_end; /* One-after the end of the current row */
uchar *m_key; /* Buffer to keep key value during searches */
int find_row(const Relay_log_info *const);
int write_row(const Relay_log_info *const, const bool);
// Unpack the current row into m_table->record[0]
int unpack_current_row(const Relay_log_info *const rli)
{
DBUG_ASSERT(m_table);
ASSERT_OR_RETURN_ERROR(m_curr_row < m_rows_end, HA_ERR_CORRUPT_EVENT);
int const result= ::unpack_row(rli, m_table, m_width, m_curr_row, &m_cols,
&m_curr_row_end, &m_master_reclength);
ASSERT_OR_RETURN_ERROR(m_curr_row_end <= m_rows_end, HA_ERR_CORRUPT_EVENT);
return result;
}
#endif
private:
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
virtual int do_apply_event(Relay_log_info const *rli);
virtual int do_update_pos(Relay_log_info *rli);
virtual enum_skip_reason do_shall_skip(Relay_log_info *rli);
/*
Primitive to prepare for a sequence of row executions.
DESCRIPTION
Before doing a sequence of do_prepare_row() and do_exec_row()
calls, this member function should be called to prepare for the
entire sequence. Typically, this member function will allocate
space for any buffers that are needed for the two member
functions mentioned above.
RETURN VALUE
The member function will return 0 if all went OK, or a non-zero
error code otherwise.
*/
virtual
int do_before_row_operations(const Slave_reporting_capability *const log) = 0;
/*
Primitive to clean up after a sequence of row executions.
DESCRIPTION
After doing a sequence of do_prepare_row() and do_exec_row(),
this member function should be called to clean up and release
any allocated buffers.
The error argument, if non-zero, indicates an error which happened during
row processing before this function was called. In this case, even if
function is successful, it should return the error code given in the argument.
*/
virtual
int do_after_row_operations(const Slave_reporting_capability *const log,
int error) = 0;
/*
Primitive to do the actual execution necessary for a row.
DESCRIPTION
The member function will do the actual execution needed to handle a row.
The row is located at m_curr_row. When the function returns,
m_curr_row_end should point at the next row (one byte after the end
of the current row).
RETURN VALUE
0 if execution succeeded, 1 if execution failed.
*/
virtual int do_exec_row(const Relay_log_info *const rli) = 0;
#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
/********** END OF CUT & PASTE FROM Rows_log_event **********/
protected: protected:
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
int do_apply_event(Rows_log_event*,const Relay_log_info*); int do_apply_event(Old_rows_log_event*,const Relay_log_info*);
/* /*
Primitive to prepare for a sequence of row executions. Primitive to prepare for a sequence of row executions.
...@@ -100,32 +351,60 @@ class Old_rows_log_event ...@@ -100,32 +351,60 @@ class Old_rows_log_event
}; };
class Write_rows_log_event_old /**
: public Write_rows_log_event, public Old_rows_log_event @class Write_rows_log_event_old
{
Old class for binlog events that write new rows to a table (event
type code PRE_GA_WRITE_ROWS_EVENT). Such events are never produced
by this version of the server, but they may be read from a relay log
created by an old server. New servers create events of class
Write_rows_log_event (event type code WRITE_ROWS_EVENT) instead.
*/
class Write_rows_log_event_old : public Old_rows_log_event
{
/********** BEGIN CUT & PASTE FROM Write_rows_log_event **********/
public: public:
enum
{
/* Support interface to THD::binlog_prepare_pending_rows_event */
TYPE_CODE = PRE_GA_WRITE_ROWS_EVENT
};
#if !defined(MYSQL_CLIENT) #if !defined(MYSQL_CLIENT)
Write_rows_log_event_old(THD *thd, TABLE *table, ulong table_id, Write_rows_log_event_old(THD*, TABLE*, ulong table_id,
MY_BITMAP const *cols, bool is_transactional) MY_BITMAP const *cols, bool is_transactional);
: Write_rows_log_event(thd, table, table_id, cols, is_transactional)
{
}
#endif #endif
#if defined(HAVE_REPLICATION) #ifdef HAVE_REPLICATION
Write_rows_log_event_old(const char *buf, uint event_len, Write_rows_log_event_old(const char *buf, uint event_len,
const Format_description_log_event *descr) const Format_description_log_event *description_event);
: Write_rows_log_event(buf, event_len, descr) #endif
#if !defined(MYSQL_CLIENT)
static bool binlog_row_logging_function(THD *thd, TABLE *table,
bool is_transactional,
MY_BITMAP *cols,
uint fields,
const uchar *before_record
__attribute__((unused)),
const uchar *after_record)
{ {
return thd->binlog_write_row(table, is_transactional,
cols, fields, after_record);
} }
#endif #endif
private:
#ifdef MYSQL_CLIENT
void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
#endif
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
virtual int do_before_row_operations(const Slave_reporting_capability *const);
virtual int do_after_row_operations(const Slave_reporting_capability *const,int);
virtual int do_exec_row(const Relay_log_info *const);
#endif
/********** END OF CUT & PASTE FROM Write_rows_log_event **********/
public:
enum
{
/* Support interface to THD::binlog_prepare_pending_rows_event */
TYPE_CODE = PRE_GA_WRITE_ROWS_EVENT
};
private: private:
virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; }
...@@ -145,35 +424,74 @@ private: ...@@ -145,35 +424,74 @@ private:
}; };
class Update_rows_log_event_old /**
: public Update_rows_log_event, public Old_rows_log_event @class Update_rows_log_event_old
Old class for binlog events that modify existing rows to a table
(event type code PRE_GA_UPDATE_ROWS_EVENT). Such events are never
produced by this version of the server, but they may be read from a
relay log created by an old server. New servers create events of
class Update_rows_log_event (event type code UPDATE_ROWS_EVENT)
instead.
*/
class Update_rows_log_event_old : public Old_rows_log_event
{ {
uchar *m_after_image, *m_memory; /********** BEGIN CUT & PASTE FROM Update_rows_log_event **********/
public: public:
enum #ifndef MYSQL_CLIENT
{ Update_rows_log_event_old(THD*, TABLE*, ulong table_id,
/* Support interface to THD::binlog_prepare_pending_rows_event */ MY_BITMAP const *cols,
TYPE_CODE = PRE_GA_UPDATE_ROWS_EVENT bool is_transactional);
};
#if !defined(MYSQL_CLIENT) void init(MY_BITMAP const *cols);
Update_rows_log_event_old(THD *thd, TABLE *table, ulong table_id, #endif
MY_BITMAP const *cols, bool is_transactional)
: Update_rows_log_event(thd, table, table_id, cols, is_transactional), virtual ~Update_rows_log_event_old();
m_after_image(NULL), m_memory(NULL)
#ifdef HAVE_REPLICATION
Update_rows_log_event_old(const char *buf, uint event_len,
const Format_description_log_event *description_event);
#endif
#if !defined(MYSQL_CLIENT)
static bool binlog_row_logging_function(THD *thd, TABLE *table,
bool is_transactional,
MY_BITMAP *cols,
uint fields,
const uchar *before_record,
const uchar *after_record)
{ {
return thd->binlog_update_row(table, is_transactional,
cols, fields, before_record, after_record);
} }
#endif #endif
#if defined(HAVE_REPLICATION)
Update_rows_log_event_old(const char *buf, uint event_len, virtual bool is_valid() const
const Format_description_log_event *descr)
: Update_rows_log_event(buf, event_len, descr),
m_after_image(NULL), m_memory(NULL)
{ {
return Old_rows_log_event::is_valid() && m_cols_ai.bitmap;
} }
protected:
#ifdef MYSQL_CLIENT
void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
#endif #endif
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
virtual int do_before_row_operations(const Slave_reporting_capability *const);
virtual int do_after_row_operations(const Slave_reporting_capability *const,int);
virtual int do_exec_row(const Relay_log_info *const);
#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
/********** END OF CUT & PASTE FROM Update_rows_log_event **********/
uchar *m_after_image, *m_memory;
public:
enum
{
/* Support interface to THD::binlog_prepare_pending_rows_event */
TYPE_CODE = PRE_GA_UPDATE_ROWS_EVENT
};
private: private:
virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; }
...@@ -192,9 +510,54 @@ private: ...@@ -192,9 +510,54 @@ private:
}; };
class Delete_rows_log_event_old /**
: public Delete_rows_log_event, public Old_rows_log_event @class Delete_rows_log_event_old
Old class for binlog events that delete existing rows from a table
(event type code PRE_GA_DELETE_ROWS_EVENT). Such events are never
produced by this version of the server, but they may be read from a
relay log created by an old server. New servers create events of
class Delete_rows_log_event (event type code DELETE_ROWS_EVENT)
instead.
*/
class Delete_rows_log_event_old : public Old_rows_log_event
{ {
/********** BEGIN CUT & PASTE FROM Update_rows_log_event **********/
public:
#ifndef MYSQL_CLIENT
Delete_rows_log_event_old(THD*, TABLE*, ulong,
MY_BITMAP const *cols, bool is_transactional);
#endif
#ifdef HAVE_REPLICATION
Delete_rows_log_event_old(const char *buf, uint event_len,
const Format_description_log_event *description_event);
#endif
#if !defined(MYSQL_CLIENT)
static bool binlog_row_logging_function(THD *thd, TABLE *table,
bool is_transactional,
MY_BITMAP *cols,
uint fields,
const uchar *before_record,
const uchar *after_record
__attribute__((unused)))
{
return thd->binlog_delete_row(table, is_transactional,
cols, fields, before_record);
}
#endif
protected:
#ifdef MYSQL_CLIENT
void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
#endif
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
virtual int do_before_row_operations(const Slave_reporting_capability *const);
virtual int do_after_row_operations(const Slave_reporting_capability *const,int);
virtual int do_exec_row(const Relay_log_info *const);
#endif
/********** END CUT & PASTE FROM Delete_rows_log_event **********/
uchar *m_after_image, *m_memory; uchar *m_after_image, *m_memory;
public: public:
...@@ -204,23 +567,6 @@ public: ...@@ -204,23 +567,6 @@ public:
TYPE_CODE = PRE_GA_DELETE_ROWS_EVENT TYPE_CODE = PRE_GA_DELETE_ROWS_EVENT
}; };
#if !defined(MYSQL_CLIENT)
Delete_rows_log_event_old(THD *thd, TABLE *table, ulong table_id,
MY_BITMAP const *cols, bool is_transactional)
: Delete_rows_log_event(thd, table, table_id, cols, is_transactional),
m_after_image(NULL), m_memory(NULL)
{
}
#endif
#if defined(HAVE_REPLICATION)
Delete_rows_log_event_old(const char *buf, uint event_len,
const Format_description_log_event *descr)
: Delete_rows_log_event(buf, event_len, descr),
m_after_image(NULL), m_memory(NULL)
{
}
#endif
private: private:
virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; } virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; }
...@@ -240,4 +586,3 @@ private: ...@@ -240,4 +586,3 @@ private:
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment