Commit 83aeee64 authored by unknown's avatar unknown

Merge work:/home/bk/mysql-4.0

into mysql.sashanet.com:/home/sasha/src/bk/mysql-4.0

parents 1c809563 cc9076f1
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This diff is collapsed.
@c FIX AGL 20011108 Extracted from manual.texi.
@c Should only be on website with new submits by webform.
@node MySQL Testimonials, Contrib, Users, Top
@appendix MySQL Testimonials
@cindex MySQL Testimonials
The section 'MySQL Users' contains a lot of different links to
MySQL users but doesn't provide that much information about how
they are using MySQL. @xref{Users}. This section gives you an idea
of how other MySQL users are using MySQL to solve their problems.
Please note that all new stories are added on the MySQL website,
@uref{http://www.mysql.com/}.
Do let us know about @emph{your} success story too!
@itemize @bullet
@item
@strong{Peter Zaitsev of Spylog.ru} writes:
I think you might be interested in my database size. The whole database
is currently on 15 servers and I think it's about 60.000 of tables
containing about 5.000.000.000 of rows. My mostly loaded server
currently holds about 10.000 of tables with 1.000.000.000 of rows in it.
Hugest tables have about 50.000.000 of rows, and this value will raise
as soon as I'll move to 2.4 kernel with large files. Currently I have to
delete much of logs for large sites to hold table sizes in 2Gb.
@item
This diff is collapsed.
......@@ -4,6 +4,8 @@ insert into t1 (bandID,payoutID) VALUES (1,6),(2,6),(3,4),(4,9),(5,10),(6,1),(7,
create table t2 (payoutID SMALLINT UNSIGNED NOT NULL PRIMARY KEY);
insert into t2 (payoutID) SELECT DISTINCT payoutID FROM t1;
insert into t2 (payoutID) SELECT payoutID+10 FROM t1;
Duplicate entry '16' for key 1
insert ignore into t2 (payoutID) SELECT payoutID+10 FROM t1;
select * from t2;
payoutID
1
......
......@@ -102,3 +102,13 @@ select status from t1;
status
1
drop table t1;
create table t1 (a int not null, b int not null);
insert into t1 values (1,1),(1,2),(1,3);
update t1 set b=4 where a=1 order by b asc limit 1;
update t1 set b=4 where a=1 order by b desc limit 1;
select * from t1;
a b
1 4
1 4
1 2
drop table t1;
......@@ -7,7 +7,9 @@ create table t1 (bandID MEDIUMINT UNSIGNED NOT NULL PRIMARY KEY, payoutID SMALLI
insert into t1 (bandID,payoutID) VALUES (1,6),(2,6),(3,4),(4,9),(5,10),(6,1),(7,12),(8,12);
create table t2 (payoutID SMALLINT UNSIGNED NOT NULL PRIMARY KEY);
insert into t2 (payoutID) SELECT DISTINCT payoutID FROM t1;
--error 1062
insert into t2 (payoutID) SELECT payoutID+10 FROM t1;
insert ignore into t2 (payoutID) SELECT payoutID+10 FROM t1;
select * from t2;
drop table t1,t2;
#
......
......@@ -76,3 +76,14 @@ alter table t1 change lfdnr lfdnr int(10) unsigned default 0 not null auto_incre
update t1 set status=1 where type='Open';
select status from t1;
drop table t1;
#
# Test of ORDER BY
#
create table t1 (a int not null, b int not null);
insert into t1 values (1,1),(1,2),(1,3);
update t1 set b=4 where a=1 order by b asc limit 1;
update t1 set b=4 where a=1 order by b desc limit 1;
select * from t1;
drop table t1;
......@@ -112,6 +112,10 @@ MY_DIR *my_dir(const char *path, myf MyFlags)
dirp = opendir(directory_file_name(tmp_path,(my_string) path));
size = STARTSIZE;
#if defined(__amiga__)
if ((dirp->dd_fd) < 0) /* Directory doesn't exists */
goto error;
#endif
if (dirp == NULL || ! (buffer = (char *) my_malloc(size, MyFlags)))
goto error;
......
Running crash-me on Interbase:
I
- got opensource version of interbase 6.0.1
(both mode, classic and superserver),
- set up DBD::InterBase from cpan,
- created database "test" and set sql_dialect for that database to 3
- executed crash-me for both interbase's models (classic and superserver).
There were some problems during the execution:
1) Sometimes client side got SIGSEGV , At that moment server side
writes
gds__alloc: non-positive size allocation request
to log file.
This problem has both models. I am not shure if it's interbase or
DBD:InterBase problem (though DBD::InterBase made all nesessary
tests during the installation without any problem)
2) In "superserver" mode ibserver several times died (and ibguard restarted it)
......@@ -2524,21 +2524,21 @@ sub report_trans
if ($rc) {
$dbh->{AutoCommit} = 1;
if (safe_query_result($check,"","")) {
save_config_data($limit,"yes",$prompt);
save_config_data($limit,"yes",$limit);
}
safe_query($clear);
} else {
$dbh->{AutoCommit} = 1;
save_config_data($limit,"error",$prompt);
save_config_data($limit,"error",$limit);
}
} else {
save_config_data($limit,"error",$prompt);
save_config_data($limit,"error",$limit);
}
$dbh->{AutoCommit} = 1;
}
else
{
save_config_data($limit,"no",$prompt);
save_config_data($limit,"no",$limit);
}
safe_query($clear);
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -121,6 +121,7 @@ sub new
$self->{'double_quotes'} = 1; # Can handle: 'Walker''s'
$self->{'vacuum'} = 1; # When using with --fast
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$limits{'NEG'} = 1; # Supports -id
$limits{'alter_add_multi_col'}= 1; #Have ALTER TABLE t add a int,add b int;
......@@ -370,6 +371,7 @@ sub new
$self->{'limits'} = \%limits;
$self->{'double_quotes'} = 0;
$self->{'drop_attr'} = "";
$self->{'transactions'} = 0; # No transactions
$self->{'blob'} = "text(" . $limits{'max_text_size'} .")";
$self->{'text'} = "text(" . $limits{'max_text_size'} .")";
......@@ -565,6 +567,7 @@ sub new
$self->{'text'} = "text";
$self->{'double_quotes'} = 1;
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$self->{"vacuum"} = 1;
$limits{'join_optimizer'} = 1; # Can optimize FROM tables
$limits{'load_data_infile'} = 0;
......@@ -854,6 +857,7 @@ sub new
$self->{'text'} = "long varchar";
$self->{'double_quotes'} = 1;
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$limits{'max_conditions'} = 9999; # Probably big enough
$limits{'max_columns'} = 2000; # From crash-me
......@@ -1095,6 +1099,7 @@ sub new
$self->{'text'} = "text";
$self->{'double_quotes'} = 1; # Can handle: 'Walker''s'
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$limits{'max_conditions'} = 1258;
$limits{'max_columns'} = 226; # server is disconnecting????
......@@ -1377,6 +1382,7 @@ sub new
$self->{'text'} = "long";
$self->{'double_quotes'} = 1; # Can handle: 'Walker''s'
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$self->{"vacuum"} = 1;
$limits{'max_conditions'} = 9999; # (Actually not a limit)
......@@ -1633,6 +1639,7 @@ sub new
$self->{'text'} = "byte in table";
$self->{'double_quotes'} = 0; # Can handle: 'Walker''s'
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$self->{'host'} = $host;
$limits{'NEG'} = 1; # Supports -id
......@@ -1834,6 +1841,7 @@ sub new
$self->{'text'} = "blob"; # text ?
$self->{'double_quotes'} = 1; # Can handle: 'Walker''s'
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$limits{'max_conditions'} = 97; # We get 'Query is too complex'
$limits{'max_columns'} = 255; # Max number of columns in table
......@@ -2010,6 +2018,7 @@ sub new
$self->{'text'} = "text";
$self->{'double_quotes'} = 1; # Can handle: 'Walker''s'
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$limits{'max_conditions'} = 1030; # We get 'Query is too complex'
$limits{'max_columns'} = 250; # Max number of columns in table
......@@ -2197,6 +2206,7 @@ sub new
$self->{'text'} = "text";
$self->{'double_quotes'} = 1; # Can handle: 'Walker''s'
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$self->{"vacuum"} = 1;
$limits{'max_conditions'} = 1030; # We get 'Query is too complex'
......@@ -2389,6 +2399,7 @@ sub new
$self->{'text'} = "long";
$self->{'double_quotes'} = 1; # Can handle: 'Walker''s'
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$limits{'max_conditions'} = 50; # (Actually not a limit)
$limits{'max_columns'} = 254; # Max number of columns in table
......@@ -2603,6 +2614,7 @@ sub new
$self->{'text'} = "varchar(255)";
$self->{'double_quotes'} = 1; # Can handle: 'Walker''s'
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$limits{'max_conditions'} = 418; # We get 'Query is too complex'
$limits{'max_columns'} = 500; # Max number of columns in table
......@@ -2772,6 +2784,7 @@ sub new
$self->{'text'} = "character varying(15000)";
$self->{'double_quotes'} = 1; # Can handle: 'Walker''s'
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$self->{'char_null'} = "cast(NULL as char(1))";
$self->{'numeric_null'} = "cast(NULL as int)";
......@@ -2972,6 +2985,7 @@ sub new
$self->{'text'} = "";
$self->{'double_quotes'} = 1; # Can handle: 'Walker''s'
$self->{'drop_attr'} = "";
$self->{'transactions'} = 1; # Transactions enabled
$self->{'char_null'} = "";
$self->{'numeric_null'} = "";
......@@ -3184,6 +3198,7 @@ sub new
$self->{'text'} = "varchar(8000000)";
$self->{'double_quotes'} = 1; # Can handle: 'Walker''s'
$self->{'drop_attr'} = ' restrict';
$self->{'transactions'} = 1; # Transactions enabled
$self->{'error_on_execute_means_zero_rows'}=1;
$limits{'max_conditions'} = 5427; # (Actually not a limit)
......
......@@ -106,6 +106,12 @@ if (!$opt_skip_create)
}
else
{
if ($opt_fast && $server->{transactions})
{
$dbh->{AutoCommit} = 0;
print "Transactions enabled\n" if ($opt_debug);
}
for ($ti = 0; $ti <= $#table_names; $ti++)
{
my $table_name = $table_names[$ti];
......@@ -128,8 +134,14 @@ if (!$opt_skip_create)
$row_count++;
}
}
if ($opt_fast && $server->{transactions})
{
$dbh->commit;
$dbh->{AutoCommit} = 1;
}
close(DATA);
}
if ($opt_lock_tables)
{
$dbh->do("UNLOCK TABLES");
......
......@@ -74,11 +74,25 @@ do_many($dbh,$server->create("bench",\@fields,\@index));
print "Insert data into the table\n";
$loop_time=new Benchmark;
if ($opt_fast && defined($server->{transactions}))
{
$dbh->{AutoCommit} = 0;
print "Transactions enabled\n" if ($opt_debug);
}
for ($i=0 ; $i < $opt_row_count ; $i++)
{
$query="insert into bench values ( " . ("$i," x ($opt_start_field_count-1)) . "$i)";
$dbh->do($query) or die $DBI::errstr;
}
if ($opt_fast && defined($server->{transactions}))
{
$dbh->commit;
$dbh->{AutoCommit} = 1;
}
$end_time=new Benchmark;
print "Time for insert ($opt_row_count)",
......
......@@ -127,14 +127,28 @@ end_benchmark($start_time);
sub test_query
{
my($test_text,$result_text,$query,$dbh,$count)=@_;
my($i,$loop_time,$end_time);
my($i,$loop_time,$end_time, $using_transactions);
print $test_text . "\n";
$loop_time=new Benchmark;
$using_transactions=0;
if ($opt_fast && server->{transactions} && $query=~ /^insert /i)
{
$using_transactions=1;
$dbh->{AutoCommit} = 0;
print "Transactions enabled\n" if ($opt_debug);
}
for ($i=0 ; $i < $count ; $i++)
{
defined(fetch_all_rows($dbh,$query)) or die $DBI::errstr;
}
if ($using_transactions)
{
$dbh->commit;
$dbh->{AutoCommit} = 1;
}
$end_time=new Benchmark;
print $result_text . "($count): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
......
......@@ -29,6 +29,7 @@
use DBI;
use Benchmark;
use Data::Dumper;
$opt_loop_count=100000; # number of rows/3
$small_loop_count=10; # Loop for full table retrieval
......@@ -132,6 +133,12 @@ else
$query="insert into bench1 (id,id2,id3,dummy1) values ";
}
if ($opt_fast && $server->{transactions})
{
$dbh->{AutoCommit} = 0;
print "Transactions enabled\n" if ($opt_debug);
}
if (($opt_fast || $opt_fast_insert) && $server->{'limits'}->{'insert_multi_value'})
{
$query_size=$server->{'limits'}->{'query_size'};
......@@ -209,6 +216,12 @@ else
}
}
if ($opt_fast && $server->{transactions})
{
$dbh->commit;
$dbh->{AutoCommit} = 1;
}
$end_time=new Benchmark;
print "Time for insert (" . ($total_rows) . "): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
......@@ -234,6 +247,12 @@ if ($limits->{'unique_index'})
{
print "Testing insert of duplicates\n";
$loop_time=new Benchmark;
if ($opt_fast && $server->{transactions})
{
$dbh->{AutoCommit} = 0;
}
for ($i=0 ; $i < $opt_loop_count ; $i++)
{
$tmpvar^= ((($tmpvar + 63) + $i)*3 % $opt_loop_count);
......@@ -244,6 +263,11 @@ if ($limits->{'unique_index'})
die "Didn't get an error when inserting duplicate record $tmp\n";
}
}
if ($opt_fast && $server->{transactions})
{
$dbh->commit;
$dbh->{AutoCommit} = 1;
}
$end_time=new Benchmark;
print "Time for insert_duplicates (" . ($opt_loop_count) . "): " .
......@@ -577,7 +601,6 @@ if ($limits->{'group_functions'})
}
$sth->finish;
$count++;
$sth=$dbh->prepare($query="select count(*),sum(id+0.0),min(id),max(id),avg(id-0.0) from bench1") or die $DBI::errstr;
$sth->execute or die $DBI::errstr;
......@@ -1250,6 +1273,11 @@ if ($server->small_rollback_segment())
}
$loop_time=new Benchmark;
if ($opt_fast && $server->{transactions})
{
$dbh->{AutoCommit} = 0;
}
$fields=$#fields;
if (($opt_fast || $opt_fast_insert) && $server->{'limits'}->{'insert_multi_value'})
{
......@@ -1297,6 +1325,13 @@ else
$dbh->do($query) or die "Got error $DBI::errstr with query: $query\n";
}
}
if ($opt_fast && $server->{transactions})
{
$dbh->commit;
$dbh->{AutoCommit} = 1;
}
$end_time=new Benchmark;
print "Time for insert_key ($many_keys_loop_count): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
......@@ -1325,11 +1360,24 @@ if ($opt_fast && defined($server->{vacuum}))
print "Testing update of keys\n";
$loop_time=new Benchmark;
if ($opt_fast && $server->{transactions})
{
$dbh->{AutoCommit} = 0;
}
for ($i=0 ; $i< 256; $i++)
{
$dbh->do("update bench1 set field5=1 where field_search=$i")
or die "Got error $DBI::errstr with query: update bench1 set field5=1 where field_search=$i\n";
}
if ($opt_fast && $server->{transactions})
{
$dbh->commit;
$dbh->{AutoCommit} = 1;
}
$end_time=new Benchmark;
print "Time for update_of_primary_key_many_keys (256): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
......@@ -1420,12 +1468,18 @@ if ($limits->{'insert_multi_value'})
"dummy1 char(30)"],
["primary key (id,id2)",
"index index_id3 (id3)"]));
$loop_time=new Benchmark;
if ($opt_lock_tables)
{
$sth = $dbh->do("LOCK TABLES bench1 write") || die $DBI::errstr;
}
if ($opt_fast && $server->{transactions})
{
$dbh->{AutoCommit} = 0;
}
$loop_time=new Benchmark;
print "Inserting $opt_loop_count rows with multiple values\n";
$query="insert into bench1 values ";
$res=$query;
......@@ -1448,6 +1502,11 @@ if ($limits->{'insert_multi_value'})
{
$sth = $dbh->do("UNLOCK TABLES ") || die $DBI::errstr;
}
if ($opt_fast && $server->{transactions})
{
$dbh->commit;
$dbh->{AutoCommit} = 1;
}
$end_time=new Benchmark;
print "Time for multiple_value_insert (" . ($opt_loop_count) . "): " .
......
......@@ -92,6 +92,12 @@ if ($opt_fast && defined($server->{vacuum}))
print "Inserting $opt_loop_count rows\n";
$loop_time=new Benchmark;
if ($opt_fast && $server->{transactions})
{
$dbh->{AutoCommit} = 0;
}
$query="insert into bench1 values (";
$half_done=$opt_loop_count/2;
for ($id=0,$rev_id=$opt_loop_count-1 ; $id < $opt_loop_count ; $id++,$rev_id--)
......@@ -105,6 +111,12 @@ for ($id=0,$rev_id=$opt_loop_count-1 ; $id < $opt_loop_count ; $id++,$rev_id--)
}
}
if ($opt_fast && $server->{transactions})
{
$dbh->commit;
$dbh->{AutoCommit} = 1;
}
$end_time=new Benchmark;
print "Time to insert ($opt_loop_count): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
......
......@@ -109,6 +109,11 @@ if ($opt_fast && $server->{'limits'}->{'load_data_infile'})
}
else
{
if ($opt_fast && $server->{transactions})
{
$dbh->{AutoCommit} = 0;
}
for ($ti = 0; $ti <= $#table_names; $ti++)
{
my $table_name = $table_names[$ti];
......@@ -133,10 +138,17 @@ else
}
close(DATA);
}
if ($opt_lock_tables)
{
do_query($dbh,"UNLOCK TABLES");
}
if ($opt_fast && $server->{transactions})
{
$dbh->commit;
$dbh->{AutoCommit} = 1;
}
$end_time=new Benchmark;
print "Time to insert ($row_count): " .
timestr(timediff($end_time, $loop_time),"all") . "\n";
......
......@@ -1626,8 +1626,7 @@ mysql_execute_command(void)
if (!(res=open_and_lock_tables(thd, tables)))
{
if ((result=new select_insert(tables->table,&lex->field_list,
lex->sql_command == SQLCOM_REPLACE_SELECT ?
DUP_REPLACE : DUP_IGNORE)))
lex->duplicates)))
res=handle_select(thd,lex,result);
}
else
......
......@@ -148,7 +148,7 @@ int mysql_update(THD *thd,
used_key_is_modified=check_if_key_used(table, used_index, fields);
else
used_key_is_modified=0;
if (used_key_is_modified)
if (used_key_is_modified || order)
{
/*
** We can't update table directly; We must first search after all
......
......@@ -2188,7 +2188,13 @@ insert:
INSERT { Lex->sql_command = SQLCOM_INSERT; } insert_lock_option opt_ignore insert2 insert_field_spec
replace:
REPLACE { Lex->sql_command = SQLCOM_REPLACE; } replace_lock_option insert2 insert_field_spec
REPLACE
{
LEX *lex=Lex;
lex->sql_command = SQLCOM_REPLACE;
lex->duplicates= DUP_REPLACE;
}
replace_lock_option insert2 insert_field_spec
insert_lock_option:
/* empty */ { Lex->lock_option= TL_WRITE_CONCURRENT_INSERT; }
......@@ -2299,10 +2305,6 @@ values:
update:
UPDATE_SYM opt_low_priority opt_ignore table_name
SET update_list
where_clause
opt_order_clause
delete_limit_clause
{
LEX *lex=Lex;
lex->sql_command = SQLCOM_UPDATE;
......@@ -2310,6 +2312,10 @@ update:
lex->select->order_list.first=0;
lex->select->order_list.next= (byte**) &lex->select->order_list.first;
}
SET update_list
where_clause
opt_order_clause
delete_limit_clause
update_list:
update_list ',' simple_ident equal expr
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment