Commit 6b15f768 authored by msvensson@neptunus.(none)'s avatar msvensson@neptunus.(none)

Merge neptunus.(none):/home/msvensson/mysql/same_tools/my41-same_tools

into  neptunus.(none):/home/msvensson/mysql/same_tools/my50-same_tools
parents f39ff057 d8191af7
......@@ -802,7 +802,7 @@ void abort_not_supported_test(const char *fmt, ...)
void abort_not_in_this_version()
{
die("Not available in mysqltest for MySQL %s", VERSION);
die("Not available in this version of mysqltest");
}
......
......@@ -5,11 +5,14 @@
# same name.
use File::Basename;
use IO::File();
use strict;
sub collect_test_cases ($);
sub collect_one_test_case ($$$$$$$);
sub mtr_options_from_test_file($$);
##############################################################################
#
# Collect information about test cases we are to run
......@@ -37,6 +40,23 @@ sub collect_test_cases ($) {
opendir(TESTDIR, $testdir) or mtr_error("Can't open dir \"$testdir\": $!");
# ----------------------------------------------------------------------
# Disable some tests listed in disabled.def
# ----------------------------------------------------------------------
my %disabled;
if ( open(DISABLED, "$testdir/disabled.def" ) )
{
while ( <DISABLED> )
{
chomp;
if ( /^\s*(\S+)\s*:\s*(.*?)\s*$/ )
{
$disabled{$1}= $2;
}
}
close DISABLED;
}
if ( @::opt_cases )
{
foreach my $tname ( @::opt_cases ) { # Run in specified order, no sort
......@@ -100,30 +120,13 @@ sub collect_test_cases ($) {
}
}
collect_one_test_case($testdir,$resdir,$tname,$elem,$cases,{},
collect_one_test_case($testdir,$resdir,$tname,$elem,$cases,\%disabled,
$component_id);
}
closedir TESTDIR;
}
else
{
# ----------------------------------------------------------------------
# Disable some tests listed in disabled.def
# ----------------------------------------------------------------------
my %disabled;
if ( ! $::opt_ignore_disabled_def and open(DISABLED, "$testdir/disabled.def" ) )
{
while ( <DISABLED> )
{
chomp;
if ( /^\s*([^\s:]+)\s*:\s*(.*?)\s*$/ )
{
$disabled{$1}= $2;
}
}
close DISABLED;
}
foreach my $elem ( sort readdir(TESTDIR) ) {
my $component_id= undef;
my $tname= undef;
......@@ -247,48 +250,54 @@ sub collect_one_test_case($$$$$$$) {
if ( $::opt_skip_rpl )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "No replication tests(--skip-rpl)";
return;
}
$tinfo->{'slave_num'}= 1; # Default, use one slave
# FIXME currently we always restart slaves
$tinfo->{'slave_restart'}= 1;
if ( $tname eq 'rpl_failsafe' or $tname eq 'rpl_chain_temp_table' )
{
# $tinfo->{'slave_num'}= 3; # Not 3 ? Check old code, strange
# $tinfo->{'slave_num'}= 3; # Not 3 ? Check old code, strange
}
}
if ( defined mtr_match_prefix($tname,"federated") )
{
$tinfo->{'slave_num'}= 1; # Default, use one slave
# FIXME currently we always restart slaves
$tinfo->{'slave_restart'}= 1;
# Default, federated uses the first slave as it's federated database
$tinfo->{'slave_num'}= 1;
}
# Cluster is needed by test case if testname contains ndb
if ( defined mtr_match_substring($tname,"ndb") )
if ( $::opt_with_ndbcluster or defined mtr_match_substring($tname,"ndb") )
{
# This is an ndb test or all tests should be run with ndb cluster started
$tinfo->{'ndb_test'}= 1;
if ( $::opt_skip_ndbcluster )
{
# Skip all ndb tests
# All ndb test's should be skipped
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "No ndbcluster test(--skip-ndbcluster)";
return;
}
if ( ! $::opt_with_ndbcluster )
if ( ! $::opt_ndbcluster_supported )
{
# Ndb is not supported, skip them
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "No ndbcluster support";
return;
}
}
else
{
# This is not a ndb test
$tinfo->{'ndb_test'}= 0;
if ( $::opt_with_ndbcluster_only )
{
# Only the ndb test should be run, all other should be skipped
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Only ndbcluster tests(--with-ndbcluster-only)";
return;
}
}
# FIXME what about embedded_server + ndbcluster, skip ?!
......@@ -380,6 +389,8 @@ sub collect_one_test_case($$$$$$$) {
if ( $::glob_win32_perl )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "No tests with sh scripts on Windows";
return;
}
else
{
......@@ -393,6 +404,8 @@ sub collect_one_test_case($$$$$$$) {
if ( $::glob_win32_perl )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "No tests with sh scripts on Windows";
return;
}
else
{
......@@ -411,18 +424,34 @@ sub collect_one_test_case($$$$$$$) {
}
# FIXME why this late?
my $marked_as_disabled= 0;
if ( $disabled->{$tname} )
{
$tinfo->{'skip'}= 1;
$tinfo->{'disable'}= 1; # Sub type of 'skip'
$tinfo->{'comment'}= $disabled->{$tname} if $disabled->{$tname};
$marked_as_disabled= 1;
$tinfo->{'comment'}= $disabled->{$tname};
}
if ( -f $disabled_file )
{
$marked_as_disabled= 1;
$tinfo->{'comment'}= mtr_fromfile($disabled_file);
}
# If test was marked as disabled, either opt_enable_disabled is off and then
# we skip this test, or it is on and then we run this test but warn
if ( $marked_as_disabled )
{
if ( $::opt_enable_disabled )
{
$tinfo->{'dont_skip_though_disabled'}= 1;
}
else
{
$tinfo->{'skip'}= 1;
$tinfo->{'disable'}= 1; # Sub type of 'skip'
$tinfo->{'comment'}= mtr_fromfile($disabled_file);
return;
}
}
if ( $component_id eq 'im' )
......@@ -430,26 +459,60 @@ sub collect_one_test_case($$$$$$$) {
if ( $::glob_use_embedded_server )
{
$tinfo->{'skip'}= 1;
mtr_report(
"Instance Manager tests are not available in embedded mode. " .
"Test case '$tname' is skipped.");
$tinfo->{'comment'}= "No IM with embedded server";
return;
}
elsif ( $::opt_ps_protocol )
{
$tinfo->{'skip'}= 1;
mtr_report(
"Instance Manager tests are not run with --ps-protocol. " .
"Test case '$tname' is skipped.");
$tinfo->{'comment'}= "No IM with --ps-protocol";
return;
}
elsif ( $::opt_skip_im )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "No IM tests(--skip-im)";
return;
}
}
else
{
mtr_options_from_test_file($tinfo,"$testdir/${tname}.test");
mtr_report(
"Instance Manager executable is unavailable." .
"Test case '$tname' is skipped.");
if ( $tinfo->{'big_test'} and ! $::opt_big_test )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Test need 'big-test' option";
return;
}
if ( $tinfo->{'ndb_extra'} and ! $::opt_ndb_extra_test )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Test need 'ndb_extra' option";
return;
}
if ( $tinfo->{'require_manager'} )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Test need the _old_ manager(to be removed)";
return;
}
if ( defined $tinfo->{'binlog_format'} and
! ( $tinfo->{'binlog_format'} eq $::used_binlog_format ) )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Not running with binlog format '$tinfo->{'binlog_format'}'";
return;
}
if ( $tinfo->{'need_debug'} && ! $::debug_compiled_binaries )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Test need debug binaries";
return;
}
}
......@@ -459,8 +522,58 @@ sub collect_one_test_case($$$$$$$) {
( $tinfo->{'master_restart'} or $tinfo->{'slave_restart'} ) )
{
$tinfo->{'skip'}= 1;
$tinfo->{'comment'}= "Can't restart a running server";
return;
}
}
# List of tags in the .test files that if found should set
# the specified value in "tinfo"
our @tags=
(
["include/have_innodb.inc", "innodb_test", 1],
["include/have_binlog_format_row.inc", "binlog_format", "row"],
["include/have_binlog_format_statement.inc", "binlog_format", "stmt"],
["include/big_test.inc", "big_test", 1],
["include/have_debug.inc", "need_debug", 1],
["include/have_ndb_extra.inc", "ndb_extra", 1],
["require_manager", "require_manager", 1],
);
sub mtr_options_from_test_file($$) {
my $tinfo= shift;
my $file= shift;
#mtr_verbose("$file");
my $F= IO::File->new($file) or mtr_error("can't open file \"$file\": $!");
while ( my $line= <$F> )
{
next if ( $line !~ /^--/ );
# Match this line against tag in "tags" array
foreach my $tag (@tags)
{
if ( index($line, $tag->[0]) >= 0 )
{
# Tag matched, assign value to "tinfo"
$tinfo->{"$tag->[1]"}= $tag->[2];
}
}
# If test sources another file, open it as well
if ( $line =~ /^\-\-([[:space:]]*)source(.*)$/ )
{
my $value= $2;
$value =~ s/^\s+//; # Remove leading space
$value =~ s/[[:space:]]+$//; # Remove ending space
my $sourced_file= "$::glob_mysql_test_dir/$value";
mtr_options_from_test_file($tinfo, $sourced_file);
}
}
}
1;
# -*- cperl -*-
# This is a library file used by the Perl version of mysql-test-run,
# and is part of the translation of the Bourne shell script with the
# same name.
use strict;
# Private IM-related operations.
sub mtr_im_kill_process ($$$$);
sub mtr_im_load_pids ($);
sub mtr_im_terminate ($);
sub mtr_im_check_alive ($);
sub mtr_im_check_main_alive ($);
sub mtr_im_check_angel_alive ($);
sub mtr_im_check_mysqlds_alive ($);
sub mtr_im_check_mysqld_alive ($);
sub mtr_im_cleanup ($);
sub mtr_im_rm_file ($);
sub mtr_im_errlog ($);
sub mtr_im_kill ($);
sub mtr_im_wait_for_connection ($$$);
sub mtr_im_wait_for_mysqld($$$);
# Public IM-related operations.
sub mtr_im_start ($$);
sub mtr_im_stop ($);
##############################################################################
#
# Private operations.
#
##############################################################################
sub mtr_im_kill_process ($$$$) {
my $pid_lst= shift;
my $signal= shift;
my $total_retries= shift;
my $timeout= shift;
my %pids;
foreach my $pid ( @{$pid_lst} )
{
$pids{$pid}= 1;
}
for ( my $cur_attempt= 1; $cur_attempt <= $total_retries; ++$cur_attempt )
{
foreach my $pid ( keys %pids )
{
mtr_debug("Sending $signal to $pid...");
kill($signal, $pid);
unless ( kill (0, $pid) )
{
mtr_debug("Process $pid died.");
delete $pids{$pid};
}
}
return if scalar keys %pids == 0;
mtr_debug("Sleeping $timeout second(s) waiting for processes to die...");
sleep($timeout);
}
mtr_debug("Process(es) " .
join(' ', keys %pids) .
" is still alive after $total_retries " .
"of sending signal $signal.");
}
###########################################################################
sub mtr_im_load_pids($) {
my $im= shift;
mtr_debug("Loading PID files...");
# Obtain mysqld-process pids.
my $instances = $im->{'instances'};
for ( my $idx= 0; $idx < 2; ++$idx )
{
mtr_debug("IM-guarded mysqld[$idx] PID file: '" .
$instances->[$idx]->{'path_pid'} . "'.");
my $mysqld_pid;
if ( -r $instances->[$idx]->{'path_pid'} )
{
$mysqld_pid= mtr_get_pid_from_file($instances->[$idx]->{'path_pid'});
mtr_debug("IM-guarded mysqld[$idx] PID: $mysqld_pid.");
}
else
{
$mysqld_pid= undef;
mtr_debug("IM-guarded mysqld[$idx]: no PID file.");
}
$instances->[$idx]->{'pid'}= $mysqld_pid;
}
# Re-read Instance Manager PIDs from the file, since during tests Instance
# Manager could have been restarted, so its PIDs could have been changed.
# - IM-main
mtr_debug("IM-main PID file: '$im->{path_pid}'.");
if ( -f $im->{'path_pid'} )
{
$im->{'pid'} =
mtr_get_pid_from_file($im->{'path_pid'});
mtr_debug("IM-main PID: $im->{pid}.");
}
else
{
mtr_debug("IM-main: no PID file.");
$im->{'pid'}= undef;
}
# - IM-angel
mtr_debug("IM-angel PID file: '$im->{path_angel_pid}'.");
if ( -f $im->{'path_angel_pid'} )
{
$im->{'angel_pid'} =
mtr_get_pid_from_file($im->{'path_angel_pid'});
mtr_debug("IM-angel PID: $im->{'angel_pid'}.");
}
else
{
mtr_debug("IM-angel: no PID file.");
$im->{'angel_pid'} = undef;
}
}
###########################################################################
sub mtr_im_terminate($) {
my $im= shift;
# Load pids from pid-files. We should do it first of all, because IM deletes
# them on shutdown.
mtr_im_load_pids($im);
mtr_debug("Shutting Instance Manager down...");
# Ignoring SIGCHLD so that all children could rest in peace.
start_reap_all();
# Send SIGTERM to IM-main.
if ( defined $im->{'pid'} )
{
mtr_debug("IM-main pid: $im->{pid}.");
mtr_debug("Stopping IM-main...");
mtr_im_kill_process([ $im->{'pid'} ], 'TERM', 10, 1);
}
else
{
mtr_debug("IM-main pid: n/a.");
}
# If IM-angel was alive, wait for it to die.
if ( defined $im->{'angel_pid'} )
{
mtr_debug("IM-angel pid: $im->{'angel_pid'}.");
mtr_debug("Waiting for IM-angel to die...");
my $total_attempts= 10;
for ( my $cur_attempt=1; $cur_attempt <= $total_attempts; ++$cur_attempt )
{
unless ( kill (0, $im->{'angel_pid'}) )
{
mtr_debug("IM-angel died.");
last;
}
sleep(1);
}
}
else
{
mtr_debug("IM-angel pid: n/a.");
}
stop_reap_all();
# Re-load PIDs.
mtr_im_load_pids($im);
}
###########################################################################
sub mtr_im_check_alive($) {
my $im= shift;
mtr_debug("Checking whether IM-components are alive...");
return 1 if mtr_im_check_main_alive($im);
return 1 if mtr_im_check_angel_alive($im);
return 1 if mtr_im_check_mysqlds_alive($im);
return 0;
}
###########################################################################
sub mtr_im_check_main_alive($) {
my $im= shift;
# Check that the process, that we know to be IM's, is dead.
if ( defined $im->{'pid'} )
{
if ( kill (0, $im->{'pid'}) )
{
mtr_debug("IM-main (PID: $im->{pid}) is alive.");
return 1;
}
else
{
mtr_debug("IM-main (PID: $im->{pid}) is dead.");
}
}
else
{
mtr_debug("No PID file for IM-main.");
}
# Check that IM does not accept client connections.
if ( mtr_ping_port($im->{'port'}) )
{
mtr_debug("IM-main (port: $im->{port}) " .
"is accepting connections.");
mtr_im_errlog("IM-main is accepting connections on port " .
"$im->{port}, but there is no " .
"process information.");
return 1;
}
else
{
mtr_debug("IM-main (port: $im->{port}) " .
"does not accept connections.");
return 0;
}
}
###########################################################################
sub mtr_im_check_angel_alive($) {
my $im= shift;
# Check that the process, that we know to be the Angel, is dead.
if ( defined $im->{'angel_pid'} )
{
if ( kill (0, $im->{'angel_pid'}) )
{
mtr_debug("IM-angel (PID: $im->{angel_pid}) is alive.");
return 1;
}
else
{
mtr_debug("IM-angel (PID: $im->{angel_pid}) is dead.");
return 0;
}
}
else
{
mtr_debug("No PID file for IM-angel.");
return 0;
}
}
###########################################################################
sub mtr_im_check_mysqlds_alive($) {
my $im= shift;
mtr_debug("Checking for IM-guarded mysqld instances...");
my $instances = $im->{'instances'};
for ( my $idx= 0; $idx < 2; ++$idx )
{
mtr_debug("Checking mysqld[$idx]...");
return 1
if mtr_im_check_mysqld_alive($instances->[$idx]);
}
}
###########################################################################
sub mtr_im_check_mysqld_alive($) {
my $mysqld_instance= shift;
# Check that the process is dead.
if ( defined $mysqld_instance->{'pid'} )
{
if ( kill (0, $mysqld_instance->{'pid'}) )
{
mtr_debug("Mysqld instance (PID: $mysqld_instance->{pid}) is alive.");
return 1;
}
else
{
mtr_debug("Mysqld instance (PID: $mysqld_instance->{pid}) is dead.");
}
}
else
{
mtr_debug("No PID file for mysqld instance.");
}
# Check that mysqld does not accept client connections.
if ( mtr_ping_port($mysqld_instance->{'port'}) )
{
mtr_debug("Mysqld instance (port: $mysqld_instance->{port}) " .
"is accepting connections.");
mtr_im_errlog("Mysqld is accepting connections on port " .
"$mysqld_instance->{port}, but there is no " .
"process information.");
return 1;
}
else
{
mtr_debug("Mysqld instance (port: $mysqld_instance->{port}) " .
"does not accept connections.");
return 0;
}
}
###########################################################################
sub mtr_im_cleanup($) {
my $im= shift;
mtr_im_rm_file($im->{'path_pid'});
mtr_im_rm_file($im->{'path_sock'});
mtr_im_rm_file($im->{'path_angel_pid'});
for ( my $idx= 0; $idx < 2; ++$idx )
{
mtr_im_rm_file($im->{'instances'}->[$idx]->{'path_pid'});
mtr_im_rm_file($im->{'instances'}->[$idx]->{'path_sock'});
}
}
###########################################################################
sub mtr_im_rm_file($)
{
my $file_path= shift;
if ( -f $file_path )
{
mtr_debug("Removing '$file_path'...");
unless ( unlink($file_path) )
{
mtr_warning("Can not remove '$file_path'.")
}
}
else
{
mtr_debug("File '$file_path' does not exist already.");
}
}
###########################################################################
sub mtr_im_errlog($) {
my $msg= shift;
# Complain in error log so that a warning will be shown.
#
# TODO: unless BUG#20761 is fixed, we will print the warning to stdout, so
# that it can be seen on console and does not produce pushbuild error.
# my $errlog= "$opt_vardir/log/mysql-test-run.pl.err";
#
# open (ERRLOG, ">>$errlog") ||
# mtr_error("Can not open error log ($errlog)");
#
# my $ts= localtime();
# print ERRLOG
# "Warning: [$ts] $msg\n";
#
# close ERRLOG;
my $ts= localtime();
print "Warning: [$ts] $msg\n";
}
###########################################################################
sub mtr_im_kill($) {
my $im= shift;
# Re-load PIDs. That can be useful because some processes could have been
# restarted.
mtr_im_load_pids($im);
# Ignoring SIGCHLD so that all children could rest in peace.
start_reap_all();
# Kill IM-angel first of all.
if ( defined $im->{'angel_pid'} )
{
mtr_debug("Killing IM-angel (PID: $im->{angel_pid})...");
mtr_im_kill_process([ $im->{'angel_pid'} ], 'KILL', 10, 1)
}
else
{
mtr_debug("IM-angel is dead.");
}
# Re-load PIDs again.
mtr_im_load_pids($im);
# Kill IM-main.
if ( defined $im->{'pid'} )
{
mtr_debug("Killing IM-main (PID: $im->pid})...");
mtr_im_kill_process([ $im->{'pid'} ], 'KILL', 10, 1);
}
else
{
mtr_debug("IM-main is dead.");
}
# Re-load PIDs again.
mtr_im_load_pids($im);
# Kill guarded mysqld instances.
my @mysqld_pids;
mtr_debug("Collecting PIDs of mysqld instances to kill...");
for ( my $idx= 0; $idx < 2; ++$idx )
{
my $pid= $im->{'instances'}->[$idx]->{'pid'};
unless ( defined $pid )
{
next;
}
mtr_debug(" - IM-guarded mysqld[$idx] PID: $pid.");
push (@mysqld_pids, $pid);
}
if ( scalar @mysqld_pids > 0 )
{
mtr_debug("Killing IM-guarded mysqld instances...");
mtr_im_kill_process(\@mysqld_pids, 'KILL', 10, 1);
}
# That's all.
stop_reap_all();
}
##############################################################################
sub mtr_im_wait_for_connection($$$) {
my $im= shift;
my $total_attempts= shift;
my $connect_timeout= shift;
mtr_debug("Waiting for IM on port $im->{port} " .
"to start accepting connections...");
for ( my $cur_attempt= 1; $cur_attempt <= $total_attempts; ++$cur_attempt )
{
mtr_debug("Trying to connect to IM ($cur_attempt of $total_attempts)...");
if ( mtr_ping_port($im->{'port'}) )
{
mtr_debug("IM is accepting connections " .
"on port $im->{port}.");
return 1;
}
mtr_debug("Sleeping $connect_timeout...");
sleep($connect_timeout);
}
mtr_debug("IM does not accept connections " .
"on port $im->{port} after " .
($total_attempts * $connect_timeout) . " seconds.");
return 0;
}
##############################################################################
sub mtr_im_wait_for_mysqld($$$) {
my $mysqld= shift;
my $total_attempts= shift;
my $connect_timeout= shift;
mtr_debug("Waiting for IM-guarded mysqld on port $mysqld->{port} " .
"to start accepting connections...");
for ( my $cur_attempt= 1; $cur_attempt <= $total_attempts; ++$cur_attempt )
{
mtr_debug("Trying to connect to mysqld " .
"($cur_attempt of $total_attempts)...");
if ( mtr_ping_port($mysqld->{'port'}) )
{
mtr_debug("Mysqld is accepting connections " .
"on port $mysqld->{port}.");
return 1;
}
mtr_debug("Sleeping $connect_timeout...");
sleep($connect_timeout);
}
mtr_debug("Mysqld does not accept connections " .
"on port $mysqld->{port} after " .
($total_attempts * $connect_timeout) . " seconds.");
return 0;
}
##############################################################################
#
# Public operations.
#
##############################################################################
sub mtr_im_start($$) {
my $im = shift;
my $opts = shift;
mtr_debug("Starting Instance Manager...");
my $args;
mtr_init_args(\$args);
mtr_add_arg($args, "--defaults-file=%s", $im->{'defaults_file'});
foreach my $opt ( @{$opts} )
{
mtr_add_arg($args, $opt);
}
$im->{'pid'} =
mtr_spawn(
$::exe_im, # path to the executable
$args, # cmd-line args
'', # stdin
$im->{'path_log'}, # stdout
$im->{'path_err'}, # stderr
'', # pid file path (not used)
{ append_log_file => 1 } # append log files
);
unless ( $im->{'pid'} )
{
mtr_error('Could not start Instance Manager.')
}
# Instance Manager can be run in daemon mode. In this case, it creates
# several processes and the parent process, created by mtr_spawn(), exits just
# after start. So, we have to obtain Instance Manager PID from the PID file.
mtr_debug("Waiting for IM to create PID file (" .
"path: '$im->{path_pid}'; " .
"timeout: $im->{start_timeout})...");
unless ( sleep_until_file_created($im->{'path_pid'},
$im->{'start_timeout'},
-1) ) # real PID is still unknown
{
mtr_debug("IM has not created PID file in $im->{start_timeout} secs.");
mtr_debug("Aborting test suite...");
mtr_kill_leftovers();
mtr_report("IM has not created PID file in $im->{start_timeout} secs.");
return 0;
}
$im->{'pid'}= mtr_get_pid_from_file($im->{'path_pid'});
mtr_debug("Instance Manager started. PID: $im->{pid}.");
# Wait until we can connect to IM.
my $IM_CONNECT_TIMEOUT= 30;
unless ( mtr_im_wait_for_connection($im,
$IM_CONNECT_TIMEOUT, 1) )
{
mtr_debug("Can not connect to Instance Manager " .
"in $IM_CONNECT_TIMEOUT seconds after start.");
mtr_debug("Aborting test suite...");
mtr_kill_leftovers();
mtr_report("Can not connect to Instance Manager " .
"in $IM_CONNECT_TIMEOUT seconds after start.");
return 0;
}
# Wait for IM to start guarded instances:
# - wait for PID files;
mtr_debug("Waiting for guarded mysqlds instances to create PID files...");
for ( my $idx= 0; $idx < 2; ++$idx )
{
my $mysqld= $im->{'instances'}->[$idx];
if ( exists $mysqld->{'nonguarded'} )
{
next;
}
mtr_debug("Waiting for mysqld[$idx] to create PID file (" .
"path: '$mysqld->{path_pid}'; " .
"timeout: $mysqld->{start_timeout})...");
unless ( sleep_until_file_created($mysqld->{'path_pid'},
$mysqld->{'start_timeout'},
-1) ) # real PID is still unknown
{
mtr_debug("mysqld[$idx] has not created PID file in " .
"$mysqld->{start_timeout} secs.");
mtr_debug("Aborting test suite...");
mtr_kill_leftovers();
mtr_report("mysqld[$idx] has not created PID file in " .
"$mysqld->{start_timeout} secs.");
return 0;
}
mtr_debug("PID file for mysqld[$idx] ($mysqld->{path_pid} created.");
}
# Wait until we can connect to guarded mysqld-instances
# (in other words -- wait for IM to start guarded instances).
mtr_debug("Waiting for guarded mysqlds to start accepting connections...");
for ( my $idx= 0; $idx < 2; ++$idx )
{
my $mysqld= $im->{'instances'}->[$idx];
if ( exists $mysqld->{'nonguarded'} )
{
next;
}
mtr_debug("Waiting for mysqld[$idx] to accept connection...");
unless ( mtr_im_wait_for_mysqld($mysqld, 30, 1) )
{
mtr_debug("Can not connect to mysqld[$idx] " .
"in $IM_CONNECT_TIMEOUT seconds after start.");
mtr_debug("Aborting test suite...");
mtr_kill_leftovers();
mtr_report("Can not connect to mysqld[$idx] " .
"in $IM_CONNECT_TIMEOUT seconds after start.");
return 0;
}
mtr_debug("mysqld[$idx] started.");
}
mtr_debug("Instance Manager and its components are up and running.");
return 1;
}
##############################################################################
sub mtr_im_stop($) {
my $im= shift;
mtr_debug("Stopping Instance Manager...");
# Try graceful shutdown.
mtr_im_terminate($im);
# Check that all processes died.
unless ( mtr_im_check_alive($im) )
{
mtr_debug("Instance Manager has been stopped successfully.");
mtr_im_cleanup($im);
return 1;
}
# Instance Manager don't want to die. We should kill it.
mtr_im_errlog("Instance Manager did not shutdown gracefully.");
mtr_im_kill($im);
# Check again that all IM-related processes have been killed.
my $im_is_alive= mtr_im_check_alive($im);
mtr_im_cleanup($im);
if ( $im_is_alive )
{
mtr_debug("Can not kill Instance Manager or its children.");
return 0;
}
mtr_debug("Instance Manager has been killed successfully.");
return 1;
}
###########################################################################
1;
......@@ -11,6 +11,8 @@ sub mtr_get_opts_from_file ($);
sub mtr_fromfile ($);
sub mtr_tofile ($@);
sub mtr_tonewfile($@);
sub mtr_lastlinefromfile($);
sub mtr_appendfile_to_file ($$);
##############################################################################
#
......@@ -139,6 +141,20 @@ sub mtr_fromfile ($) {
return $text;
}
sub mtr_lastlinefromfile ($) {
my $file= shift;
my $text;
open(FILE,"<",$file) or mtr_error("can't open file \"$file\": $!");
while (my $line= <FILE>)
{
$text= $line;
}
close FILE;
return $text;
}
sub mtr_tofile ($@) {
my $file= shift;
......@@ -155,5 +171,17 @@ sub mtr_tonewfile ($@) {
close FILE;
}
sub mtr_appendfile_to_file ($$) {
my $from_file= shift;
my $to_file= shift;
open(TOFILE,">>",$to_file) or mtr_error("can't open file \"$to_file\": $!");
open(FROMFILE,">>",$from_file)
or mtr_error("can't open file \"$from_file\": $!");
print TOFILE while (<FROMFILE>);
close FROMFILE;
close TOFILE;
}
1;
......@@ -139,6 +139,8 @@ sub mtr_copy_dir($$) {
my $from_dir= shift;
my $to_dir= shift;
# mtr_verbose("Copying from $from_dir to $to_dir");
mkpath("$to_dir");
opendir(DIR, "$from_dir")
or mtr_error("Can't find $from_dir$!");
......
......@@ -4,7 +4,6 @@
# and is part of the translation of the Bourne shell script with the
# same name.
#use Carp qw(cluck);
use Socket;
use Errno;
use strict;
......@@ -14,35 +13,17 @@ use POSIX 'WNOHANG';
sub mtr_run ($$$$$$;$);
sub mtr_spawn ($$$$$$;$);
sub mtr_stop_mysqld_servers ($);
sub mtr_check_stop_servers ($);
sub mtr_kill_leftovers ();
sub mtr_wait_blocking ($);
sub mtr_record_dead_children ();
sub mtr_ndbmgm_start($$);
sub mtr_mysqladmin_start($$$);
sub mtr_exit ($);
sub sleep_until_file_created ($$$);
sub mtr_kill_processes ($);
sub mtr_ping_mysqld_server ($);
# Private IM-related operations.
sub mtr_im_kill_process ($$$$);
sub mtr_im_load_pids ($);
sub mtr_im_terminate ($);
sub mtr_im_check_alive ($);
sub mtr_im_check_main_alive ($);
sub mtr_im_check_angel_alive ($);
sub mtr_im_check_mysqlds_alive ($);
sub mtr_im_check_mysqld_alive ($$);
sub mtr_im_cleanup ($);
sub mtr_im_rm_file ($);
sub mtr_im_errlog ($);
sub mtr_im_kill ($);
sub mtr_im_wait_for_connection ($$$);
sub mtr_im_wait_for_mysqld($$$);
# Public IM-related operations.
sub mtr_im_start ($$);
sub mtr_im_stop ($);
sub mtr_ping_with_timeout($);
sub mtr_ping_port ($);
# static in C
sub spawn_impl ($$$$$$$$);
......@@ -54,7 +35,6 @@ sub spawn_impl ($$$$$$$$);
##############################################################################
# This function try to mimic the C version used in "netware/mysql_test_run.c"
# FIXME learn it to handle append mode as well, a "new" flag or a "append"
sub mtr_run ($$$$$$;$) {
my $path= shift;
......@@ -135,6 +115,9 @@ sub spawn_impl ($$$$$$$$) {
print STDERR "#### ", "-" x 78, "\n";
}
mtr_error("Can't spawn with empty \"path\"") unless defined $path;
FORK:
{
my $pid= fork();
......@@ -167,17 +150,6 @@ sub spawn_impl ($$$$$$$$) {
$SIG{INT}= 'DEFAULT'; # Parent do some stuff, we don't
if ( $::glob_cygwin_shell and $mode eq 'test' )
{
# Programs started from mysqltest under Cygwin, are to
# execute them within Cygwin. Else simple things in test
# files like
# --system "echo 1 > file"
# will fail.
# FIXME not working :-(
# $ENV{'COMSPEC'}= "$::glob_cygwin_shell -c";
}
my $log_file_open_mode = '>';
if ($spawn_opts and $spawn_opts->{'append_log_file'})
......@@ -187,7 +159,15 @@ sub spawn_impl ($$$$$$$$) {
if ( $output )
{
if ( ! open(STDOUT,$log_file_open_mode,$output) )
if ( $::glob_win32_perl )
{
# Don't redirect stdout on ActiveState perl since this is
# just another thread in the same process.
# Should be fixed so that the thread that is created with fork
# executes the exe in another process and wait's for it to return.
# In the meanwhile, we get all the output from mysqld's to screen
}
elsif ( ! open(STDOUT,$log_file_open_mode,$output) )
{
mtr_child_error("can't redirect STDOUT to \"$output\": $!");
}
......@@ -239,8 +219,7 @@ sub spawn_parent_impl {
{
# Simple run of command, we wait for it to return
my $ret_pid= waitpid($pid,0);
if ( $ret_pid <= 0 )
if ( $ret_pid != $pid )
{
mtr_error("$path ($pid) got lost somehow");
}
......@@ -268,7 +247,6 @@ sub spawn_parent_impl {
# Someone terminated, don't know who. Collect
# status info first before $? is lost,
# but not $exit_value, this is flagged from
#
my $timer_name= mtr_timer_timeout($::glob_timers, $ret_pid);
if ( $timer_name )
......@@ -295,45 +273,22 @@ sub spawn_parent_impl {
last;
}
# If one of the mysqld processes died, we want to
# mark this, and kill the mysqltest process.
foreach my $idx (0..1)
{
if ( $::master->[$idx]->{'pid'} eq $ret_pid )
{
mtr_debug("child $ret_pid was master[$idx], " .
"exit during mysqltest run");
$::master->[$idx]->{'pid'}= 0;
last;
}
}
foreach my $idx (0..2)
{
if ( $::slave->[$idx]->{'pid'} eq $ret_pid )
{
mtr_debug("child $ret_pid was slave[$idx], " .
"exit during mysqltest run");
$::slave->[$idx]->{'pid'}= 0;
last;
}
}
# One of the child processes died, unless this was expected
# mysqltest should be killed and test aborted
mtr_debug("waitpid() catched exit of unknown child $ret_pid, " .
"exit during mysqltest run");
check_expected_crash_and_restart($ret_pid);
}
if ( $ret_pid != $pid )
{
# We terminated the waiting because a "mysqld" process died.
# Kill the mysqltest process.
mtr_verbose("Kill mysqltest because another process died");
kill(9,$pid);
$ret_pid= waitpid($pid,0);
if ( $ret_pid == -1 )
if ( $ret_pid != $pid )
{
mtr_error("$path ($pid) got lost somehow");
}
......@@ -374,64 +329,101 @@ sub mtr_process_exit_status {
#
##############################################################################
# We just "ping" on the ports, and if we can't do a socket connect
# we assume the server is dead. So we don't *really* know a server
# is dead, we just hope that it after letting the listen port go,
# it is dead enough for us to start a new server.
# Kill all processes(mysqld, ndbd, ndb_mgmd and im) that would conflict with
# this run
# Make sure to remove the PID file, if any.
# kill IM manager first, else it will restart the servers
sub mtr_kill_leftovers () {
mtr_report("Killing Possible Leftover Processes");
mtr_debug("mtr_kill_leftovers(): started.");
mtr_im_stop($::instance_manager);
mkpath("$::opt_vardir/log"); # Needed for mysqladmin log
# Kill mysqld servers (masters and slaves) that would conflict with this
# run. Make sure to remove the PID file, if any.
# Don't touch IM-managed mysqld instances -- they should be stopped by
# mtr_im_stop().
# Stop or kill Instance Manager and all its children. If we failed to do
# that, we can only abort -- there is nothing left to do.
mtr_debug("Collecting mysqld-instances to shutdown...");
# mtr_error("Failed to stop Instance Manager.")
# unless mtr_im_stop($::instance_manager);
my @args;
# Start shutdown of masters and slaves. Don't touch IM-managed mysqld
# instances -- they should be stopped by mtr_im_stop().
for ( my $idx= 0; $idx < 2; $idx++ )
mtr_debug("Shutting down mysqld-instances...");
my @kill_pids;
my %admin_pids;
foreach my $srv (@{$::master}, @{$::slave})
{
my $pidfile= $::master->[$idx]->{'path_mypid'};
my $sockfile= $::master->[$idx]->{'path_mysock'};
my $port= $::master->[$idx]->{'path_myport'};
mtr_debug(" - mysqld " .
"(pid: $srv->{pid}; " .
"pid file: '$srv->{path_pid}'; " .
"socket: '$srv->{path_sock}'; ".
"port: $srv->{port})");
push(@args,{
pid => 0, # We don't know the PID
pidfile => $pidfile,
sockfile => $sockfile,
port => $port,
});
my $pid= mtr_mysqladmin_start($srv, "shutdown", 70);
# Save the pid of the mysqladmin process
$admin_pids{$pid}= 1;
mtr_debug(" - Master mysqld " .
"(idx: $idx; pid: '$pidfile'; socket: '$sockfile'; port: $port)");
$::master->[$idx]->{'pid'}= 0; # Assume we are done with it
push(@kill_pids,{
pid => $srv->{'pid'},
pidfile => $srv->{'path_pid'},
sockfile => $srv->{'path_sock'},
port => $srv->{'port'},
});
$srv->{'pid'}= 0; # Assume we are done with it
}
for ( my $idx= 0; $idx < 3; $idx++ )
if ( ! $::opt_skip_ndbcluster )
{
# Start shutdown of clusters.
mtr_debug("Shutting down cluster...");
foreach my $cluster (@{$::clusters})
{
my $pidfile= $::slave->[$idx]->{'path_mypid'};
my $sockfile= $::slave->[$idx]->{'path_mysock'};
my $port= $::slave->[$idx]->{'path_myport'};
mtr_debug(" - cluster " .
"(pid: $cluster->{pid}; " .
"pid file: '$cluster->{path_pid})");
my $pid= mtr_ndbmgm_start($cluster, "shutdown");
push(@args,{
pid => 0, # We don't know the PID
pidfile => $pidfile,
sockfile => $sockfile,
port => $port,
# Save the pid of the ndb_mgm process
$admin_pids{$pid}= 1;
push(@kill_pids,{
pid => $cluster->{'pid'},
pidfile => $cluster->{'path_pid'}
});
mtr_debug(" - Slave mysqld " .
"(idx: $idx; pid: '$pidfile'; socket: '$sockfile'; port: $port)");
$cluster->{'pid'}= 0; # Assume we are done with it
foreach my $ndbd (@{$cluster->{'ndbds'}})
{
mtr_debug(" - ndbd " .
"(pid: $ndbd->{pid}; " .
"pid file: '$ndbd->{path_pid})");
$::slave->[$idx]->{'pid'}= 0; # Assume we are done with it
push(@kill_pids,{
pid => $ndbd->{'pid'},
pidfile => $ndbd->{'path_pid'},
});
$ndbd->{'pid'}= 0; # Assume we are done with it
}
}
}
# Wait for all the admin processes to complete
mtr_wait_blocking(\%admin_pids);
# If we trusted "mysqladmin --shutdown_timeout= ..." we could just
# terminate now, but we don't (FIXME should be debugged).
# So we try again to ping and at least wait the same amount of time
# mysqladmin would for all to die.
mtr_mysqladmin_shutdown(\@args, 20);
mtr_ping_with_timeout(\@kill_pids);
# We now have tried to terminate nice. We have waited for the listen
# port to be free, but can't really tell if the mysqld process died
......@@ -470,14 +462,6 @@ sub mtr_kill_leftovers () {
mtr_debug("Got pid: $pid from file '$pidfile'");
# Race, could have been removed between I tested with -f
# and the unlink() below, so I better check again with -f
if ( ! unlink($pidfile) and -f $pidfile )
{
mtr_error("can't remove $pidfile");
}
if ( $::glob_cygwin_perl or kill(0, $pid) )
{
mtr_debug("There is process with pid $pid -- scheduling for kill.");
......@@ -513,7 +497,7 @@ sub mtr_kill_leftovers () {
{
mtr_debug("Sending SIGKILL to pids: " . join(' ', @pids));
kill(9, @pids);
mtr_debug("Sleep 1 second waiting for processes to die");
mtr_report("Sleep 1 second waiting for processes to die");
sleep(1) # Wait one second
} while ( $retries-- and kill(0, @pids) );
......@@ -531,58 +515,66 @@ sub mtr_kill_leftovers () {
mtr_debug("Directory for PID files ($rundir) does not exist.");
}
# We may have failed everything, bug we now check again if we have
# We may have failed everything, but we now check again if we have
# the listen ports free to use, and if they are free, just go for it.
mtr_debug("Checking known mysqld servers...");
foreach my $srv ( @args )
foreach my $srv ( @kill_pids )
{
if ( mtr_ping_mysqld_server($srv->{'port'}) )
if ( defined $srv->{'port'} and mtr_ping_port($srv->{'port'}) )
{
mtr_warning("can't kill old mysqld holding port $srv->{'port'}");
mtr_warning("can't kill old process holding port $srv->{'port'}");
}
}
mtr_debug("mtr_kill_leftovers(): finished.");
}
##############################################################################
#
# Shut down mysqld servers we have started from this run of this script
#
##############################################################################
# To speed things we kill servers in parallel. The argument is a list
# of 'ports', 'pids', 'pidfiles' and 'socketfiles'.
# Check that all processes in list are killed
# The argument is a list of 'ports', 'pids', 'pidfiles' and 'socketfiles'
# for which shutdown has been started. Make sure they all get killed
# in one way or the other.
#
# FIXME On Cygwin, and maybe some other platforms, $srv->{'pid'} and
# $srv->{'pidfile'} will not be the same PID. We need to try to kill
# the pid in $srv->{'pidfile'} will not be the same PID. We need to try to kill
# both I think.
sub mtr_stop_mysqld_servers ($) {
sub mtr_check_stop_servers ($) {
my $spec= shift;
# ----------------------------------------------------------------------
# First try nice normal shutdown using 'mysqladmin'
# ----------------------------------------------------------------------
# Return if no processes are defined
return if ! @$spec;
# Shutdown time must be high as slave may be in reconnect
mtr_mysqladmin_shutdown($spec, 70);
#mtr_report("mtr_check_stop_servers");
mtr_ping_with_timeout(\@$spec);
# ----------------------------------------------------------------------
# We loop with waitpid() nonblocking to see how many of the ones we
# are to kill, actually got killed by mtr_mysqladmin_shutdown().
# Note that we don't rely on this, the mysqld server might have stop
# are to kill, actually got killed by mysqladmin or ndb_mgm
#
# Note that we don't rely on this, the mysqld server might have stopped
# listening to the port, but still be alive. But it is a start.
# ----------------------------------------------------------------------
foreach my $srv ( @$spec )
{
if ( $srv->{'pid'} and (waitpid($srv->{'pid'},&WNOHANG) == $srv->{'pid'}) )
my $ret_pid;
if ( $srv->{'pid'} )
{
$ret_pid= waitpid($srv->{'pid'},&WNOHANG);
if ($ret_pid == $srv->{'pid'})
{
mtr_verbose("Caught exit of process $ret_pid");
$srv->{'pid'}= 0;
}
else
{
# mtr_warning("caught exit of unknown child $ret_pid");
}
}
}
# ----------------------------------------------------------------------
......@@ -615,13 +607,12 @@ sub mtr_stop_mysqld_servers ($) {
}
# ----------------------------------------------------------------------
# If the processes where started from this script, and we had no PIDS
# If all the processes in list already have been killed,
# then we don't have to do anything.
# ----------------------------------------------------------------------
if ( ! keys %mysqld_pids )
{
# cluck "This is how we got here!";
return;
}
......@@ -670,20 +661,30 @@ sub mtr_stop_mysqld_servers ($) {
foreach my $file ($srv->{'pidfile'}, $srv->{'sockfile'})
{
# Know it is dead so should be no race, careful anyway
if ( -f $file and ! unlink($file) and -f $file )
if ( defined $file and -f $file and ! unlink($file) and -f $file )
{
$errors++;
mtr_warning("couldn't delete $file");
}
}
$srv->{'pid'}= 0;
}
}
}
if ( $errors )
{
# We are in trouble, just die....
# There where errors killing processes
# do one last attempt to ping the servers
# and if they can't be pinged, assume they are dead
if ( ! mtr_ping_with_timeout( \@$spec ) )
{
mtr_error("we could not kill or clean up all processes");
}
else
{
mtr_verbose("All ports were free, continuing");
}
}
}
# FIXME We just assume they are all dead, for Cygwin we are not
......@@ -691,102 +692,96 @@ sub mtr_stop_mysqld_servers ($) {
}
# Wait for all the process in the list to terminate
sub mtr_wait_blocking($) {
my $admin_pids= shift;
##############################################################################
#
# Shut down mysqld servers using "mysqladmin ... shutdown".
# To speed this up, we start them in parallel and use waitpid() to
# catch their termination. Note that this doesn't say the servers
# are terminated, just that 'mysqladmin' is terminated.
#
# Note that mysqladmin will ask the server about what PID file it uses,
# and mysqladmin will wait for it to be removed before it terminates
# (unless passes timeout).
#
# This function will take at most about 20 seconds, and we still are not
# sure we killed them all. If none is responding to ping, we return 1,
# else we return 0.
#
##############################################################################
sub mtr_mysqladmin_shutdown {
my $spec= shift;
my $adm_shutdown_tmo= shift;
my %mysql_admin_pids;
my @to_kill_specs;
# Return if no processes defined
return if ! %$admin_pids;
mtr_debug("mtr_mysqladmin_shutdown(): starting...");
mtr_debug("Collecting mysqld-instances to shutdown...");
mtr_verbose("mtr_wait_blocking");
foreach my $srv ( @$spec )
# Wait for all the started processes to exit
# As mysqladmin is such a simple program, we trust it to terminate itself.
# I.e. we wait blocking, and wait for them all before we go on.
foreach my $pid (keys %{$admin_pids})
{
if ( mtr_ping_mysqld_server($srv->{'port'}) )
{
mtr_debug("Mysqld (port: $srv->{port}) needs to be stopped.");
my $ret_pid= waitpid($pid,0);
push(@to_kill_specs, $srv);
}
}
}
# Start "mysqladmin shutdown" for a specific mysqld
sub mtr_mysqladmin_start($$$) {
my $srv= shift;
my $command= shift;
my $adm_shutdown_tmo= shift;
foreach my $srv ( @to_kill_specs )
{
# Shutdown time must be high as slave may be in reconnect
my $args;
mtr_init_args(\$args);
mtr_add_arg($args, "--no-defaults");
mtr_add_arg($args, "--user=%s", $::opt_user);
mtr_add_arg($args, "--password=");
if ( -e $srv->{'sockfile'} )
mtr_add_arg($args, "--silent");
if ( -e $srv->{'path_sock'} )
{
mtr_add_arg($args, "--socket=%s", $srv->{'sockfile'});
mtr_add_arg($args, "--socket=%s", $srv->{'path_sock'});
}
if ( $srv->{'port'} )
{
mtr_add_arg($args, "--port=%s", $srv->{'port'});
}
if ( $srv->{'port'} and ! -e $srv->{'sockfile'} )
if ( $srv->{'port'} and ! -e $srv->{'path_sock'} )
{
mtr_add_arg($args, "--protocol=tcp"); # Needed if no --socket
}
mtr_add_arg($args, "--connect_timeout=5");
mtr_add_arg($args, "--shutdown_timeout=$adm_shutdown_tmo");
mtr_add_arg($args, "shutdown");
mtr_debug("Shutting down mysqld " .
"(port: $srv->{port}; socket: '$srv->{sockfile}')...");
# Shutdown time must be high as slave may be in reconnect
mtr_add_arg($args, "--shutdown_timeout=$adm_shutdown_tmo");
mtr_add_arg($args, "$command");
my $path_mysqladmin_log= "$::opt_vardir/log/mysqladmin.log";
my $pid= mtr_spawn($::exe_mysqladmin, $args,
"", $path_mysqladmin_log, $path_mysqladmin_log, "",
{ append_log_file => 1 });
$mysql_admin_pids{$pid}= 1;
mtr_verbose("mtr_mysqladmin_start, pid: $pid");
return $pid;
# We don't wait for termination of mysqladmin
}
}
# As mysqladmin is such a simple program, we trust it to terminate.
# I.e. we wait blocking, and wait wait for them all before we go on.
while (keys %mysql_admin_pids)
{
foreach my $pid (keys %mysql_admin_pids)
{
if ( waitpid($pid,0) > 0 )
{
delete $mysql_admin_pids{$pid};
}
}
}
# Start "ndb_mgm shutdown" for a specific cluster, it will
# shutdown all data nodes and leave the ndb_mgmd running
sub mtr_ndbmgm_start($$) {
my $cluster= shift;
my $command= shift;
# If we trusted "mysqladmin --shutdown_timeout= ..." we could just
# terminate now, but we don't (FIXME should be debugged).
# So we try again to ping and at least wait the same amount of time
# mysqladmin would for all to die.
my $args;
mtr_init_args(\$args);
my $timeout= 20; # 20 seconds max
mtr_add_arg($args, "--no-defaults");
mtr_add_arg($args, "--core");
mtr_add_arg($args, "--try-reconnect=1");
mtr_add_arg($args, "--ndb_connectstring=%s", $cluster->{'connect_string'});
mtr_add_arg($args, "-e");
mtr_add_arg($args, "$command");
my $pid= mtr_spawn($::exe_ndb_mgm, $args,
"", "/dev/null", "/dev/null", "",
{});
mtr_verbose("mtr_ndbmgm_start, pid: $pid");
return $pid;
}
# Ping all servers in list, exit when none of them answers
# or when timeout has passed
sub mtr_ping_with_timeout($) {
my $spec= shift;
my $timeout= 200; # 20 seconds max
my $res= 1; # If we just fall through, we are done
# in the sense that the servers don't
# listen to their ports any longer
......@@ -796,69 +791,182 @@ sub mtr_mysqladmin_shutdown {
TIME:
while ( $timeout-- )
{
foreach my $srv ( @to_kill_specs )
foreach my $srv ( @$spec )
{
$res= 1; # We are optimistic
if ( mtr_ping_mysqld_server($srv->{'port'}) )
if ( $srv->{'pid'} and defined $srv->{'port'} )
{
mtr_debug("Mysqld (port: $srv->{port}) is still alive.");
mtr_debug("Sleep 1 second waiting for processes to stop using port");
sleep(1); # One second
if ( mtr_ping_port($srv->{'port'}) )
{
mtr_verbose("waiting for process $srv->{'pid'} to stop ".
"using port $srv->{'port'}");
# Millisceond sleep emulated with select
select(undef, undef, undef, (0.1));
$res= 0;
next TIME;
}
else
{
# Process was not using port
}
}
}
last; # If we got here, we are done
}
if ($res)
{
mtr_debug("mtr_mysqladmin_shutdown(): All mysqld instances are down.");
mtr_debug("mtr_ping_with_timeout(): All mysqld instances are down.");
}
else
{
mtr_debug("mtr_mysqladmin_shutdown(): At least one server is alive.");
mtr_report("mtr_ping_with_timeout(): At least one server is alive.");
}
return $res;
}
##############################################################################
#
# The operating system will keep information about dead children,
# we read this information here, and if we have records the process
# is alive, we mark it as dead.
# Loop through our list of processes and look for and entry
# with the provided pid
# Set the pid of that process to 0 if found
#
##############################################################################
sub mark_process_dead($)
{
my $ret_pid= shift;
sub mtr_record_dead_children () {
foreach my $mysqld (@{$::master}, @{$::slave})
{
if ( $mysqld->{'pid'} eq $ret_pid )
{
mtr_verbose("$mysqld->{'type'} $mysqld->{'idx'} exited, pid: $ret_pid");
$mysqld->{'pid'}= 0;
return;
}
}
my $ret_pid;
foreach my $cluster (@{$::clusters})
{
if ( $cluster->{'pid'} eq $ret_pid )
{
mtr_verbose("$cluster->{'name'} cluster ndb_mgmd exited, pid: $ret_pid");
$cluster->{'pid'}= 0;
return;
}
foreach my $ndbd (@{$cluster->{'ndbds'}})
{
if ( $ndbd->{'pid'} eq $ret_pid )
{
mtr_verbose("$cluster->{'name'} cluster ndbd exited, pid: $ret_pid");
$ndbd->{'pid'}= 0;
return;
}
}
}
mtr_warning("mark_process_dead couldn't find an entry for pid: $ret_pid");
}
#
# Loop through our list of processes and look for and entry
# with the provided pid, if found check for the file indicating
# expected crash and restart it.
#
sub check_expected_crash_and_restart($)
{
my $ret_pid= shift;
foreach my $mysqld (@{$::master}, @{$::slave})
{
if ( $mysqld->{'pid'} eq $ret_pid )
{
mtr_verbose("$mysqld->{'type'} $mysqld->{'idx'} exited, pid: $ret_pid");
$mysqld->{'pid'}= 0;
# Check if crash expected and restart if it was
my $expect_file= "$::opt_vardir/tmp/" . "$mysqld->{'type'}" .
"$mysqld->{'idx'}" . ".expect";
if ( -f $expect_file )
{
mtr_verbose("Crash was expected, file $expect_file exists");
mysqld_start($mysqld, $mysqld->{'start_opts'},
$mysqld->{'start_slave_master_info'});
unlink($expect_file);
}
return;
}
}
# FIXME the man page says to wait for -1 to terminate,
# but on OS X we get '0' all the time...
while ( ($ret_pid= waitpid(-1,&WNOHANG)) > 0 )
foreach my $cluster (@{$::clusters})
{
mtr_debug("waitpid() catched exit of child $ret_pid");
foreach my $idx (0..1)
if ( $cluster->{'pid'} eq $ret_pid )
{
if ( $::master->[$idx]->{'pid'} eq $ret_pid )
mtr_verbose("$cluster->{'name'} cluster ndb_mgmd exited, pid: $ret_pid");
$cluster->{'pid'}= 0;
# Check if crash expected and restart if it was
my $expect_file= "$::opt_vardir/tmp/ndb_mgmd_" . "$cluster->{'type'}" .
".expect";
if ( -f $expect_file )
{
mtr_debug("child $ret_pid was master[$idx]");
$::master->[$idx]->{'pid'}= 0;
mtr_verbose("Crash was expected, file $expect_file exists");
ndbmgmd_start($cluster);
unlink($expect_file);
}
return;
}
foreach my $idx (0..2)
foreach my $ndbd (@{$cluster->{'ndbds'}})
{
if ( $::slave->[$idx]->{'pid'} eq $ret_pid )
if ( $ndbd->{'pid'} eq $ret_pid )
{
mtr_debug("child $ret_pid was slave[$idx]");
$::slave->[$idx]->{'pid'}= 0;
last;
mtr_verbose("$cluster->{'name'} cluster ndbd exited, pid: $ret_pid");
$ndbd->{'pid'}= 0;
# Check if crash expected and restart if it was
my $expect_file= "$::opt_vardir/tmp/ndbd_" . "$cluster->{'type'}" .
"$ndbd->{'idx'}" . ".expect";
if ( -f $expect_file )
{
mtr_verbose("Crash was expected, file $expect_file exists");
ndbd_start($cluster, $ndbd->{'idx'},
$ndbd->{'start_extra_args'});
unlink($expect_file);
}
return;
}
}
}
mtr_warning("check_expected_crash_and_restart couldn't find an entry for pid: $ret_pid");
}
##############################################################################
#
# The operating system will keep information about dead children,
# we read this information here, and if we have records the process
# is alive, we mark it as dead.
#
##############################################################################
sub mtr_record_dead_children () {
my $process_died= 0;
my $ret_pid;
# Wait without blockinng to see if any processes had died
# -1 or 0 means there are no more procesess to wait for
while ( ($ret_pid= waitpid(-1,&WNOHANG)) != 0 and $ret_pid != -1)
{
mtr_warning("mtr_record_dead_children: $ret_pid");
mark_process_dead($ret_pid);
$process_died= 1;
}
return $process_died;
}
sub start_reap_all {
......@@ -870,16 +978,24 @@ sub start_reap_all {
# here. If a process terminated before setting $SIG{CHLD} (but after
# any attempt to waitpid() it), it will still be a zombie. So we
# have to handle any such process here.
while(waitpid(-1, &WNOHANG) > 0) { };
my $pid;
while(($pid= waitpid(-1, &WNOHANG)) != 0 and $pid != -1)
{
mtr_warning("start_reap_all pid: $pid");
mark_process_dead($pid);
};
}
sub stop_reap_all {
$SIG{CHLD}= 'DEFAULT';
}
sub mtr_ping_mysqld_server ($) {
sub mtr_ping_port ($) {
my $port= shift;
mtr_verbose("mtr_ping_port: $port");
my $remote= "localhost";
my $iaddr= inet_aton($remote);
if ( ! $iaddr )
......@@ -897,13 +1013,13 @@ sub mtr_ping_mysqld_server ($) {
if ( connect(SOCK, $paddr) )
{
mtr_debug("Server (port: $port) is alive.");
close(SOCK); # FIXME check error?
mtr_verbose("USED");
return 1;
}
else
{
mtr_debug("Server (port: $port) is dead.");
mtr_verbose("FREE");
return 0;
}
}
......@@ -933,15 +1049,15 @@ sub sleep_until_file_created ($$$) {
# Check if it died after the fork() was successful
if ( $pid != 0 && waitpid($pid,&WNOHANG) == $pid )
{
mtr_warning("Process $pid died");
return 0;
}
mtr_debug("Sleep $sleeptime milliseconds waiting for ".
"creation of $pidfile");
mtr_debug("Sleep $sleeptime milliseconds waiting for $pidfile");
# Print extra message every 60 seconds
my $seconds= ($loop * $sleeptime) / 1000;
if ( $seconds > 1 and $seconds % 60 == 0 )
if ( $seconds > 1 and int($seconds) % 60 == 0 )
{
my $left= $timeout - $seconds;
mtr_warning("Waited $seconds seconds for $pidfile to be created, " .
......@@ -959,20 +1075,18 @@ sub sleep_until_file_created ($$$) {
sub mtr_kill_processes ($) {
my $pids = shift;
foreach my $sig (15, 9)
mtr_verbose("mtr_kill_processes " . join(" ", @$pids));
foreach my $pid (@$pids)
{
my $retries= 10;
while (1)
foreach my $sig (15, 9)
{
kill($sig, @{$pids});
last unless kill (0, @{$pids}) and $retries--;
mtr_debug("Sleep 2 second waiting for processes to die");
sleep(2);
last if mtr_im_kill_process([ $pid ], $sig, 10, 1);
}
}
}
##############################################################################
#
# When we exit, we kill off all children
......@@ -982,7 +1096,7 @@ sub mtr_kill_processes ($) {
# FIXME something is wrong, we sometimes terminate with "Hangup" written
# to tty, and no STDERR output telling us why.
# FIXME for some readon, setting HUP to 'IGNORE' will cause exit() to
# FIXME for some reason, setting HUP to 'IGNORE' will cause exit() to
# write out "Hangup", and maybe loose some output. We insert a sleep...
sub mtr_exit ($) {
......@@ -997,679 +1111,9 @@ sub mtr_exit ($) {
# set ourselves as the group leader at startup (with
# POSIX::setpgrp(0,0)), but then care must be needed to always do
# proper child process cleanup.
kill('HUP', -$$) if $$ == getpgrp();
sleep 2;
exit($code);
}
##############################################################################
#
# Instance Manager management routines.
#
##############################################################################
sub mtr_im_kill_process ($$$$) {
my $pid_lst= shift;
my $signal= shift;
my $total_retries= shift;
my $timeout= shift;
my %pids;
foreach my $pid (@{$pid_lst})
{
$pids{$pid}= 1;
}
for (my $cur_attempt= 1; $cur_attempt <= $total_retries; ++$cur_attempt)
{
foreach my $pid (keys %pids)
{
mtr_debug("Sending $signal to $pid...");
kill($signal, $pid);
unless (kill (0, $pid))
{
mtr_debug("Process $pid died.");
delete $pids{$pid};
}
}
return if scalar keys %pids == 0;
mtr_debug("Sleeping $timeout second(s) waiting for processes to die...");
sleep($timeout);
}
mtr_debug("Process(es) " .
join(' ', keys %pids) .
" is still alive after $total_retries " .
"of sending signal $signal.");
}
###########################################################################
sub mtr_im_load_pids($) {
my $instance_manager= shift;
mtr_debug("Loading PID files...");
# Obtain mysqld-process pids.
my $instances = $instance_manager->{'instances'};
for (my $idx= 0; $idx < 2; ++$idx)
{
mtr_debug("IM-guarded mysqld[$idx] PID file: '" .
$instances->[$idx]->{'path_pid'} . "'.");
my $mysqld_pid;
if (-r $instances->[$idx]->{'path_pid'})
{
$mysqld_pid= mtr_get_pid_from_file($instances->[$idx]->{'path_pid'});
mtr_debug("IM-guarded mysqld[$idx] PID: $mysqld_pid.");
}
else
{
$mysqld_pid= undef;
mtr_debug("IM-guarded mysqld[$idx]: no PID file.");
}
$instances->[$idx]->{'pid'}= $mysqld_pid;
}
# Re-read Instance Manager PIDs from the file, since during tests Instance
# Manager could have been restarted, so its PIDs could have been changed.
# - IM-main
mtr_debug("IM-main PID file: '$instance_manager->{path_pid}'.");
if (-f $instance_manager->{'path_pid'})
{
$instance_manager->{'pid'} =
mtr_get_pid_from_file($instance_manager->{'path_pid'});
mtr_debug("IM-main PID: $instance_manager->{pid}.");
}
else
{
mtr_debug("IM-main: no PID file.");
$instance_manager->{'pid'}= undef;
}
# - IM-angel
mtr_debug("IM-angel PID file: '$instance_manager->{path_angel_pid}'.");
kill('HUP', -$$) if !$::glob_win32_perl and $$ == getpgrp();
if (-f $instance_manager->{'path_angel_pid'})
{
$instance_manager->{'angel_pid'} =
mtr_get_pid_from_file($instance_manager->{'path_angel_pid'});
mtr_debug("IM-angel PID: $instance_manager->{'angel_pid'}.");
}
else
{
mtr_debug("IM-angel: no PID file.");
$instance_manager->{'angel_pid'} = undef;
}
}
###########################################################################
sub mtr_im_terminate($) {
my $instance_manager= shift;
# Load pids from pid-files. We should do it first of all, because IM deletes
# them on shutdown.
mtr_im_load_pids($instance_manager);
mtr_debug("Shutting Instance Manager down...");
# Ignoring SIGCHLD so that all children could rest in peace.
start_reap_all();
# Send SIGTERM to IM-main.
if (defined $instance_manager->{'pid'})
{
mtr_debug("IM-main pid: $instance_manager->{pid}.");
mtr_debug("Stopping IM-main...");
mtr_im_kill_process([ $instance_manager->{'pid'} ], 'TERM', 10, 1);
}
else
{
mtr_debug("IM-main pid: n/a.");
}
# If IM-angel was alive, wait for it to die.
if (defined $instance_manager->{'angel_pid'})
{
mtr_debug("IM-angel pid: $instance_manager->{'angel_pid'}.");
mtr_debug("Waiting for IM-angel to die...");
my $total_attempts= 10;
for (my $cur_attempt=1; $cur_attempt <= $total_attempts; ++$cur_attempt)
{
unless (kill (0, $instance_manager->{'angel_pid'}))
{
mtr_debug("IM-angel died.");
last;
}
sleep(1);
}
}
else
{
mtr_debug("IM-angel pid: n/a.");
}
stop_reap_all();
# Re-load PIDs.
mtr_im_load_pids($instance_manager);
}
###########################################################################
sub mtr_im_check_alive($) {
my $instance_manager= shift;
mtr_debug("Checking whether IM-components are alive...");
return 1 if mtr_im_check_main_alive($instance_manager);
return 1 if mtr_im_check_angel_alive($instance_manager);
return 1 if mtr_im_check_mysqlds_alive($instance_manager);
return 0;
}
###########################################################################
sub mtr_im_check_main_alive($) {
my $instance_manager= shift;
# Check that the process, that we know to be IM's, is dead.
if (defined $instance_manager->{'pid'})
{
if (kill (0, $instance_manager->{'pid'}))
{
mtr_debug("IM-main (PID: $instance_manager->{pid}) is alive.");
return 1;
}
else
{
mtr_debug("IM-main (PID: $instance_manager->{pid}) is dead.");
}
}
else
{
mtr_debug("No PID file for IM-main.");
}
# Check that IM does not accept client connections.
if (mtr_ping_mysqld_server($instance_manager->{'port'}))
{
mtr_debug("IM-main (port: $instance_manager->{port}) " .
"is accepting connections.");
mtr_im_errlog("IM-main is accepting connections on port " .
"$instance_manager->{port}, but there is no " .
"process information.");
return 1;
}
else
{
mtr_debug("IM-main (port: $instance_manager->{port}) " .
"does not accept connections.");
return 0;
}
}
###########################################################################
sub mtr_im_check_angel_alive($) {
my $instance_manager= shift;
# Check that the process, that we know to be the Angel, is dead.
if (defined $instance_manager->{'angel_pid'})
{
if (kill (0, $instance_manager->{'angel_pid'}))
{
mtr_debug("IM-angel (PID: $instance_manager->{angel_pid}) is alive.");
return 1;
}
else
{
mtr_debug("IM-angel (PID: $instance_manager->{angel_pid}) is dead.");
return 0;
}
}
else
{
mtr_debug("No PID file for IM-angel.");
return 0;
}
}
###########################################################################
sub mtr_im_check_mysqlds_alive($) {
my $instance_manager= shift;
mtr_debug("Checking for IM-guarded mysqld instances...");
my $instances = $instance_manager->{'instances'};
for (my $idx= 0; $idx < 2; ++$idx)
{
mtr_debug("Checking mysqld[$idx]...");
return 1
if mtr_im_check_mysqld_alive($instance_manager, $instances->[$idx]);
}
}
###########################################################################
sub mtr_im_check_mysqld_alive($$) {
my $instance_manager= shift;
my $mysqld_instance= shift;
# Check that the process is dead.
if (defined $instance_manager->{'pid'})
{
if (kill (0, $instance_manager->{'pid'}))
{
mtr_debug("Mysqld instance (PID: $mysqld_instance->{pid}) is alive.");
return 1;
}
else
{
mtr_debug("Mysqld instance (PID: $mysqld_instance->{pid}) is dead.");
}
}
else
{
mtr_debug("No PID file for mysqld instance.");
}
# Check that mysqld does not accept client connections.
if (mtr_ping_mysqld_server($mysqld_instance->{'port'}))
{
mtr_debug("Mysqld instance (port: $mysqld_instance->{port}) " .
"is accepting connections.");
mtr_im_errlog("Mysqld is accepting connections on port " .
"$mysqld_instance->{port}, but there is no " .
"process information.");
return 1;
}
else
{
mtr_debug("Mysqld instance (port: $mysqld_instance->{port}) " .
"does not accept connections.");
return 0;
}
}
###########################################################################
sub mtr_im_cleanup($) {
my $instance_manager= shift;
mtr_im_rm_file($instance_manager->{'path_pid'});
mtr_im_rm_file($instance_manager->{'path_sock'});
mtr_im_rm_file($instance_manager->{'path_angel_pid'});
for (my $idx= 0; $idx < 2; ++$idx)
{
mtr_im_rm_file($instance_manager->{'instances'}->[$idx]->{'path_pid'});
mtr_im_rm_file($instance_manager->{'instances'}->[$idx]->{'path_sock'});
}
}
###########################################################################
sub mtr_im_rm_file($)
{
my $file_path= shift;
if (-f $file_path)
{
mtr_debug("Removing '$file_path'...");
mtr_warning("Can not remove '$file_path'.")
unless unlink($file_path);
}
else
{
mtr_debug("File '$file_path' does not exist already.");
}
}
###########################################################################
sub mtr_im_errlog($) {
my $msg= shift;
# Complain in error log so that a warning will be shown.
#
# TODO: unless BUG#20761 is fixed, we will print the warning to stdout, so
# that it can be seen on console and does not produce pushbuild error.
# my $errlog= "$opt_vardir/log/mysql-test-run.pl.err";
#
# open (ERRLOG, ">>$errlog") ||
# mtr_error("Can not open error log ($errlog)");
#
# my $ts= localtime();
# print ERRLOG
# "Warning: [$ts] $msg\n";
#
# close ERRLOG;
my $ts= localtime();
print "Warning: [$ts] $msg\n";
}
###########################################################################
sub mtr_im_kill($) {
my $instance_manager= shift;
# Re-load PIDs. That can be useful because some processes could have been
# restarted.
mtr_im_load_pids($instance_manager);
# Ignoring SIGCHLD so that all children could rest in peace.
start_reap_all();
# Kill IM-angel first of all.
if (defined $instance_manager->{'angel_pid'})
{
mtr_debug("Killing IM-angel (PID: $instance_manager->{angel_pid})...");
mtr_im_kill_process([ $instance_manager->{'angel_pid'} ], 'KILL', 10, 1)
}
else
{
mtr_debug("IM-angel is dead.");
}
# Re-load PIDs again.
mtr_im_load_pids($instance_manager);
# Kill IM-main.
if (defined $instance_manager->{'pid'})
{
mtr_debug("Killing IM-main (PID: $instance_manager->pid})...");
mtr_im_kill_process([ $instance_manager->{'pid'} ], 'KILL', 10, 1);
}
else
{
mtr_debug("IM-main is dead.");
}
# Re-load PIDs again.
mtr_im_load_pids($instance_manager);
# Kill guarded mysqld instances.
my @mysqld_pids;
mtr_debug("Collecting PIDs of mysqld instances to kill...");
for (my $idx= 0; $idx < 2; ++$idx)
{
my $pid= $instance_manager->{'instances'}->[$idx]->{'pid'};
next unless defined $pid;
mtr_debug(" - IM-guarded mysqld[$idx] PID: $pid.");
push (@mysqld_pids, $pid);
}
if (scalar @mysqld_pids > 0)
{
mtr_debug("Killing IM-guarded mysqld instances...");
mtr_im_kill_process(\@mysqld_pids, 'KILL', 10, 1);
}
# That's all.
stop_reap_all();
}
##############################################################################
sub mtr_im_wait_for_connection($$$) {
my $instance_manager= shift;
my $total_attempts= shift;
my $connect_timeout= shift;
mtr_debug("Waiting for IM on port $instance_manager->{port} " .
"to start accepting connections...");
for (my $cur_attempt= 1; $cur_attempt <= $total_attempts; ++$cur_attempt)
{
mtr_debug("Trying to connect to IM ($cur_attempt of $total_attempts)...");
if (mtr_ping_mysqld_server($instance_manager->{'port'}))
{
mtr_debug("IM is accepting connections " .
"on port $instance_manager->{port}.");
return 1;
}
mtr_debug("Sleeping $connect_timeout...");
sleep($connect_timeout);
}
mtr_debug("IM does not accept connections " .
"on port $instance_manager->{port} after " .
($total_attempts * $connect_timeout) . " seconds.");
return 0;
}
##############################################################################
sub mtr_im_wait_for_mysqld($$$) {
my $mysqld= shift;
my $total_attempts= shift;
my $connect_timeout= shift;
mtr_debug("Waiting for IM-guarded mysqld on port $mysqld->{port} " .
"to start accepting connections...");
for (my $cur_attempt= 1; $cur_attempt <= $total_attempts; ++$cur_attempt)
{
mtr_debug("Trying to connect to mysqld " .
"($cur_attempt of $total_attempts)...");
if (mtr_ping_mysqld_server($mysqld->{'port'}))
{
mtr_debug("Mysqld is accepting connections " .
"on port $mysqld->{port}.");
return 1;
}
mtr_debug("Sleeping $connect_timeout...");
sleep($connect_timeout);
}
mtr_debug("Mysqld does not accept connections " .
"on port $mysqld->{port} after " .
($total_attempts * $connect_timeout) . " seconds.");
return 0;
}
##############################################################################
sub mtr_im_start($$) {
my $instance_manager = shift;
my $opts = shift;
mtr_debug("Starting Instance Manager...");
my $args;
mtr_init_args(\$args);
mtr_add_arg($args, "--defaults-file=%s",
$instance_manager->{'defaults_file'});
foreach my $opt (@{$opts})
{
mtr_add_arg($args, $opt);
}
$instance_manager->{'pid'} =
mtr_spawn(
$::exe_im, # path to the executable
$args, # cmd-line args
'', # stdin
$instance_manager->{'path_log'}, # stdout
$instance_manager->{'path_err'}, # stderr
'', # pid file path (not used)
{ append_log_file => 1 } # append log files
);
if ( ! $instance_manager->{'pid'} )
{
mtr_report('Could not start Instance Manager');
return;
}
# Instance Manager can be run in daemon mode. In this case, it creates
# several processes and the parent process, created by mtr_spawn(), exits just
# after start. So, we have to obtain Instance Manager PID from the PID file.
if ( ! sleep_until_file_created(
$instance_manager->{'path_pid'},
$instance_manager->{'start_timeout'},
-1)) # real PID is still unknown
{
mtr_report("Instance Manager PID file is missing");
return;
}
$instance_manager->{'pid'} =
mtr_get_pid_from_file($instance_manager->{'path_pid'});
mtr_debug("Instance Manager started. PID: $instance_manager->{pid}.");
# Wait until we can connect to IM.
my $IM_CONNECT_TIMEOUT= 30;
unless (mtr_im_wait_for_connection($instance_manager,
$IM_CONNECT_TIMEOUT, 1))
{
mtr_debug("Can not connect to Instance Manager " .
"in $IM_CONNECT_TIMEOUT seconds after start.");
mtr_debug("Aborting test suite...");
mtr_kill_leftovers();
mtr_error("Can not connect to Instance Manager " .
"in $IM_CONNECT_TIMEOUT seconds after start.");
}
# Wait until we can connect to guarded mysqld-instances
# (in other words -- wait for IM to start guarded instances).
for (my $idx= 0; $idx < 2; ++$idx)
{
my $mysqld= $instance_manager->{'instances'}->[$idx];
next if exists $mysqld->{'nonguarded'};
mtr_debug("Waiting for mysqld[$idx] to start...");
unless (mtr_im_wait_for_mysqld($mysqld, 30, 1))
{
mtr_debug("Can not connect to mysqld[$idx] " .
"in $IM_CONNECT_TIMEOUT seconds after start.");
mtr_debug("Aborting test suite...");
mtr_kill_leftovers();
mtr_error("Can not connect to mysqld[$idx] " .
"in $IM_CONNECT_TIMEOUT seconds after start.");
}
mtr_debug("mysqld[$idx] started.");
}
mtr_debug("Instance Manager started.");
}
##############################################################################
sub mtr_im_stop($) {
my $instance_manager= shift;
mtr_debug("Stopping Instance Manager...");
# Try graceful shutdown.
mtr_im_terminate($instance_manager);
# Check that all processes died.
unless (mtr_im_check_alive($instance_manager))
{
mtr_debug("Instance Manager has been stopped successfully.");
mtr_im_cleanup($instance_manager);
return 1;
}
# Instance Manager don't want to die. We should kill it.
mtr_im_errlog("Instance Manager did not shutdown gracefully.");
mtr_im_kill($instance_manager);
# Check again that all IM-related processes have been killed.
my $im_is_alive= mtr_im_check_alive($instance_manager);
mtr_im_cleanup($instance_manager);
if ($im_is_alive)
{
mtr_error("Can not kill Instance Manager or its children.");
return 0;
}
mtr_debug("Instance Manager has been killed successfully.");
return 1;
exit($code);
}
###########################################################################
......
......@@ -10,6 +10,7 @@ sub mtr_report_test_name($);
sub mtr_report_test_passed($);
sub mtr_report_test_failed($);
sub mtr_report_test_skipped($);
sub mtr_report_test_not_skipped_though_disabled($);
sub mtr_show_failed_diff ($);
sub mtr_report_stats ($);
......@@ -21,6 +22,7 @@ sub mtr_warning (@);
sub mtr_error (@);
sub mtr_child_error (@);
sub mtr_debug (@);
sub mtr_verbose (@);
##############################################################################
......@@ -96,7 +98,24 @@ sub mtr_report_test_skipped ($) {
}
else
{
print "[ skipped ]\n";
print "[ skipped ] $tinfo->{'comment'}\n";
}
}
sub mtr_report_tests_not_skipped_though_disabled ($) {
my $tests= shift;
if ( $::opt_enable_disabled )
{
my @disabled_tests= grep {$_->{'dont_skip_though_disabled'}} @$tests;
if ( @disabled_tests )
{
print "\nTest(s) which will be run though they are marked as disabled:\n";
foreach my $tinfo ( sort {$a->{'name'} cmp $b->{'name'}} @disabled_tests )
{
printf " %-20s : %s\n", $tinfo->{'name'}, $tinfo->{'comment'};
}
}
}
}
......@@ -107,7 +126,7 @@ sub mtr_report_test_passed ($) {
if ( $::opt_timer and -f "$::opt_vardir/log/timer" )
{
$timer= mtr_fromfile("$::opt_vardir/log/timer");
$::glob_tot_real_time += $timer;
$::glob_tot_real_time += ($timer/1000);
$timer= sprintf "%12s", $timer;
}
$tinfo->{'result'}= 'MTR_RES_PASSED';
......@@ -122,7 +141,7 @@ sub mtr_report_test_failed ($) {
{
print "[ fail ] timeout\n";
}
elsif ( $tinfo->{'ndb_test'} and !$::flag_ndb_status_ok)
elsif ( $tinfo->{'ndb_test'} and $::cluster->[0]->{'installed_ok'} eq "NO")
{
print "[ fail ] ndbcluster start failure\n";
return;
......@@ -157,6 +176,7 @@ sub mtr_report_stats ($) {
my $tot_passed= 0;
my $tot_failed= 0;
my $tot_tests= 0;
my $tot_restarts= 0;
my $found_problems= 0; # Some warnings are errors...
foreach my $tinfo (@$tests)
......@@ -175,6 +195,10 @@ sub mtr_report_stats ($) {
$tot_tests++;
$tot_failed++;
}
if ( $tinfo->{'restarted'} )
{
$tot_restarts++;
}
}
# ----------------------------------------------------------------------
......@@ -197,6 +221,14 @@ sub mtr_report_stats ($) {
"the documentation at\n",
"http://www.mysql.com/doc/en/MySQL_test_suite.html\n";
}
print
"The servers were restarted $tot_restarts times\n";
if ( $::opt_timer )
{
print
"Spent $::glob_tot_real_time seconds actually executing testcases\n"
}
# ----------------------------------------------------------------------
# If a debug run, there might be interesting information inside
......@@ -216,7 +248,10 @@ sub mtr_report_stats ($) {
else
{
# We report different types of problems in order
foreach my $pattern ( "^Warning:", "^Error:", "^==.* at 0x" )
foreach my $pattern ( "^Warning:", "^Error:", "^==.* at 0x",
"InnoDB: Warning", "missing DBUG_RETURN",
"mysqld: Warning",
"Attempting backtrace", "Assertion .* failed" )
{
foreach my $errlog ( sort glob("$::opt_vardir/log/*.err") )
{
......@@ -230,7 +265,8 @@ sub mtr_report_stats ($) {
# Skip some non fatal warnings from the log files
if ( /Warning:\s+Table:.* on (delete|rename)/ or
/Warning:\s+Setting lower_case_table_names=2/ or
/Warning:\s+One can only use the --user.*root/ )
/Warning:\s+One can only use the --user.*root/ or
/InnoDB: Warning: we did not need to do crash recovery/)
{
next; # Skip these lines
}
......@@ -241,6 +277,7 @@ sub mtr_report_stats ($) {
}
}
}
}
if ( $found_problems )
{
mtr_warning("Got errors/warnings while running tests, please examine",
......@@ -248,7 +285,6 @@ sub mtr_report_stats ($) {
}
}
}
}
print "\n";
......@@ -331,5 +367,11 @@ sub mtr_debug (@) {
print STDERR "####: ",join(" ", @_),"\n";
}
}
sub mtr_verbose (@) {
if ( $::opt_verbose )
{
print STDERR "> ",join(" ", @_),"\n";
}
}
1;
......@@ -21,15 +21,13 @@ sub run_stress_test ()
{
my $args;
my $stress_basedir;
my $stress_suitedir;
mtr_report("Starting stress testing\n");
if ( ! $::glob_use_embedded_server and ! $::opt_local_master )
if ( ! $::glob_use_embedded_server )
{
$::master->[0]->{'pid'}= mysqld_start('master',0,[],[],0);
if ( ! $::master->[0]->{'pid'} )
if ( ! mysqld_start($::master->[0],[],[]) )
{
mtr_error("Can't start the mysqld server");
}
......
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment