Commit fdff9c0a authored by unknown's avatar unknown

Merge mysql.sashanet.com:/home/sasha/src/bk/mysql

into mysql.sashanet.com:/home/sasha/src/bk/mysql-4.0


BitKeeper/etc/logging_ok:
  auto-union
Docs/manual.texi:
  Auto merged
mysql-test/r/innodb.result:
  Auto merged
mysys/tree.c:
  Auto merged
sql-bench/bench-init.pl.sh:
  Auto merged
sql-bench/server-cfg.sh:
  Auto merged
sql/mysqld.cc:
  Auto merged
parents 1da42e7b 2197533b
...@@ -4,5 +4,7 @@ monty@hundin.mysql.fi ...@@ -4,5 +4,7 @@ monty@hundin.mysql.fi
monty@work.mysql.com monty@work.mysql.com
mwagner@evoq.mwagner.org mwagner@evoq.mwagner.org
paul@central.snake.net paul@central.snake.net
paul@teton.kitebird.com
sasha@mysql.sashanet.com sasha@mysql.sashanet.com
serg@serg.mysql.com
tonu@hundin.mysql.fi tonu@hundin.mysql.fi
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
%!PS-Adobe-2.0 EPSF-2.0
%%Creator: pnmtops
%%Title: latvia.ps
%%Pages: 1
%%BoundingBox: 295 365 317 396
%%EndComments
/readstring {
currentfile exch readhexstring pop
} bind def
/rpicstr 32 string def
/gpicstr 32 string def
/bpicstr 32 string def
%%EndProlog
%%Page: 1 1
gsave
295.44 365.64 translate
21.12 30.72 scale
0.5 0.5 translate 90 rotate -0.5 -0.5 translate
32 22 8
[ 32 0 0 -22 0 22 ]
{ rpicstr readstring }
{ gpicstr readstring }
{ bpicstr readstring }
true 3
colorimage
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000009494949494949494949494949494949494949494949494
949494949494940000101010101010101010101010101010101010101010
101010101010101010000018181818181818181818181818181818181818
181818181818181818181800009494949494949494949494949494949494
949494949494949494949494940000101010101010101010101010101010
101010101010101010101010101010000018181818181818181818181818
181818181818181818181818181818181800009494949494949494949494
949494949494949494949494949494949494940000101010101010101010
101010101010101010101010101010101010101010000018181818181818
181818181818181818181818181818181818181818181800009494949494
949494949494949494949494949494949494949494949494940000101010
101010101010101010101010101010101010101010101010101010000018
181818181818181818181818181818181818181818181818181818181800
009494949494949494949494949494949494949494949494949494949494
940000101010101010101010101010101010101010101010101010101010
101010000018181818181818181818181818181818181818181818181818
181818181800009494949494949494949494949494949494949494949494
949494949494940000101010101010101010101010101010101010101010
101010101010101010000018181818181818181818181818181818181818
181818181818181818181800009494949494949494949494949494949494
949494949494949494949494940000101010101010101010101010101010
101010101010101010101010101010000018181818181818181818181818
181818181818181818181818181818181800009494949494949494949494
949494949494949494949494949494949494940000101010101010101010
101010101010101010101010101010101010101010000018181818181818
18181818181818181818181818181818181818181818180000ffffffffff
ffffffffffffffffffffffffffffffffffffffffffffffffff0000ffffff
ffffffffffffffffffffffffffffffffffffffffffffffffffffff0000ff
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00
00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
ff0000ffffffffffffffffffffffffffffffffffffffffffffffffffffff
ffffff0000ffffffffffffffffffffffffffffffffffffffffffffffffff
ffffffffff0000ffffffffffffffffffffffffffffffffffffffffffffff
ffffffffffffff0000ffffffffffffffffffffffffffffffffffffffffff
ffffffffffffffffff0000ffffffffffffffffffffffffffffffffffffff
ffffffffffffffffffffff0000ffffffffffffffffffffffffffffffffff
ffffffffffffffffffffffffff0000ffffffffffffffffffffffffffffff
ffffffffffffffffffffffffffffff0000ffffffffffffffffffffffffff
ffffffffffffffffffffffffffffffffff00009494949494949494949494
949494949494949494949494949494949494940000101010101010101010
101010101010101010101010101010101010101010000018181818181818
181818181818181818181818181818181818181818181800009494949494
949494949494949494949494949494949494949494949494940000101010
101010101010101010101010101010101010101010101010101010000018
181818181818181818181818181818181818181818181818181818181800
009494949494949494949494949494949494949494949494949494949494
940000101010101010101010101010101010101010101010101010101010
101010000018181818181818181818181818181818181818181818181818
181818181800009494949494949494949494949494949494949494949494
949494949494940000101010101010101010101010101010101010101010
101010101010101010000018181818181818181818181818181818181818
181818181818181818181800009494949494949494949494949494949494
949494949494949494949494940000101010101010101010101010101010
101010101010101010101010101010000018181818181818181818181818
181818181818181818181818181818181800009494949494949494949494
949494949494949494949494949494949494940000101010101010101010
101010101010101010101010101010101010101010000018181818181818
181818181818181818181818181818181818181818181800009494949494
949494949494949494949494949494949494949494949494940000101010
101010101010101010101010101010101010101010101010101010000018
181818181818181818181818181818181818181818181818181818181800
009494949494949494949494949494949494949494949494949494949494
940000101010101010101010101010101010101010101010101010101010
101010000018181818181818181818181818181818181818181818181818
181818181800000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000000000000000000000000000000000000000
000000000000000000000000
grestore
showpage
%%Trailer
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
File mode changed from 100755 to 100644
...@@ -2137,7 +2137,7 @@ The server can provide error messages to clients in many languages. ...@@ -2137,7 +2137,7 @@ The server can provide error messages to clients in many languages.
@item @item
Clients may connect to the @strong{MySQL} server using TCP/IP Sockets, Clients may connect to the @strong{MySQL} server using TCP/IP Sockets,
Unix Sockets (Unixes), or Named Pipes (NT). Unix Sockets (Unix), or Named Pipes (NT).
@item @item
The @strong{MySQL}-specific @code{SHOW} command can be used to retrieve The @strong{MySQL}-specific @code{SHOW} command can be used to retrieve
...@@ -5249,7 +5249,7 @@ clients can connect to both @strong{MySQL} versions. ...@@ -5249,7 +5249,7 @@ clients can connect to both @strong{MySQL} versions.
The extended @strong{MySQL} binary distribution is marked with the The extended @strong{MySQL} binary distribution is marked with the
@code{-max} suffix and is configured with the same options as @code{-max} suffix and is configured with the same options as
@code{mysqld-max}. @xref{mysqld-max}. @code{mysqld-max}. @xref{mysqld-max, @code{mysqld-max}}.
If you want to use the @code{MySQL-Max} RPM, you must first If you want to use the @code{MySQL-Max} RPM, you must first
install the standard @code{MySQL} RPM. install the standard @code{MySQL} RPM.
...@@ -5590,8 +5590,8 @@ indicates the type of operating system for which the distribution is intended ...@@ -5590,8 +5590,8 @@ indicates the type of operating system for which the distribution is intended
@item @item
If you see a binary distribution marked with the @code{-max} prefix, this If you see a binary distribution marked with the @code{-max} prefix, this
means that the binary has support for transaction-safe tables and other means that the binary has support for transaction-safe tables and other
features. @xref{mysqld-max}. Note that all binaries are built from features. @xref{mysqld-max, @code{mysqld-max}}. Note that all binaries
the same @strong{MySQL} source distribution. are built from the same @strong{MySQL} source distribution.
@item @item
Add a user and group for @code{mysqld} to run as: Add a user and group for @code{mysqld} to run as:
...@@ -5603,8 +5603,8 @@ shell> useradd -g mysql mysql ...@@ -5603,8 +5603,8 @@ shell> useradd -g mysql mysql
These commands add the @code{mysql} group and the @code{mysql} user. The These commands add the @code{mysql} group and the @code{mysql} user. The
syntax for @code{useradd} and @code{groupadd} may differ slightly on different syntax for @code{useradd} and @code{groupadd} may differ slightly on different
Unixes. They may also be called @code{adduser} and @code{addgroup}. You may versions of Unix. They may also be called @code{adduser} and @code{addgroup}.
wish to call the user and group something else instead of @code{mysql}. You may wish to call the user and group something else instead of @code{mysql}.
@item @item
Change into the intended installation directory: Change into the intended installation directory:
...@@ -5647,7 +5647,8 @@ programs properly. @xref{Environment variables}. ...@@ -5647,7 +5647,8 @@ programs properly. @xref{Environment variables}.
@item scripts @item scripts
This directory contains the @code{mysql_install_db} script used to initialize This directory contains the @code{mysql_install_db} script used to initialize
the server access permissions. the @code{mysql} database containing the grant tables that store the server
access permissions.
@end table @end table
@item @item
...@@ -5713,7 +5714,7 @@ You can start the @strong{MySQL} server with the following command: ...@@ -5713,7 +5714,7 @@ You can start the @strong{MySQL} server with the following command:
shell> bin/safe_mysqld --user=mysql & shell> bin/safe_mysqld --user=mysql &
@end example @end example
@xref{safe_mysqld}. @xref{safe_mysqld, @code{safe_mysqld}}.
@xref{Post-installation}. @xref{Post-installation}.
...@@ -6117,8 +6118,8 @@ shell> useradd -g mysql mysql ...@@ -6117,8 +6118,8 @@ shell> useradd -g mysql mysql
These commands add the @code{mysql} group, and the @code{mysql} user. The These commands add the @code{mysql} group, and the @code{mysql} user. The
syntax for @code{useradd} and @code{groupadd} may differ slightly on different syntax for @code{useradd} and @code{groupadd} may differ slightly on different
Unixes. They may also be called @code{adduser} and @code{addgroup}. You may versions of Unix. They may also be called @code{adduser} and @code{addgroup}.
wish to call the user and group something else instead of @code{mysql}. You may wish to call the user and group something else instead of @code{mysql}.
@item @item
Unpack the distribution into the current directory: Unpack the distribution into the current directory:
...@@ -7672,13 +7673,13 @@ To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV ...@@ -7672,13 +7673,13 @@ To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV
signal, you can start @code{mysqld} with the @code{--core-file} option. Note signal, you can start @code{mysqld} with the @code{--core-file} option. Note
that you also probably need to raise the @code{core file size} by adding that you also probably need to raise the @code{core file size} by adding
@code{ulimit -c 1000000} to @code{safe_mysqld} or starting @code{safe_mysqld} @code{ulimit -c 1000000} to @code{safe_mysqld} or starting @code{safe_mysqld}
with @code{--core-file-sizes=1000000}. @xref{safe_mysqld}. with @code{--core-file-sizes=1000000}. @xref{safe_mysqld, @code{safe_mysqld}}.
To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV signal, you can To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV signal, you can
start @code{mysqld} with the @code{--core-file} option. Note that you also probably start @code{mysqld} with the @code{--core-file} option. Note that you also probably
need to raise the @code{core file size} by adding @code{ulimit -c 1000000} to need to raise the @code{core file size} by adding @code{ulimit -c 1000000} to
@code{safe_mysqld} or starting @code{safe_mysqld} with @code{safe_mysqld} or starting @code{safe_mysqld} with
@code{--core-file-sizes=1000000}. @xref{safe_mysqld}. @code{--core-file-sizes=1000000}. @xref{safe_mysqld, @code{safe_mysqld}}.
If you are linking your own @strong{MySQL} client and get the error: If you are linking your own @strong{MySQL} client and get the error:
...@@ -8006,7 +8007,7 @@ shell> nohup mysqld [options] & ...@@ -8006,7 +8007,7 @@ shell> nohup mysqld [options] &
@code{nohup} causes the command following it to ignore any @code{SIGHUP} @code{nohup} causes the command following it to ignore any @code{SIGHUP}
signal sent from the terminal. Alternatively, start the server by running signal sent from the terminal. Alternatively, start the server by running
@code{safe_mysqld}, which invokes @code{mysqld} using @code{nohup} for you. @code{safe_mysqld}, which invokes @code{mysqld} using @code{nohup} for you.
@xref{safe_mysqld}. @xref{safe_mysqld, @code{safe_mysqld}}.
If you get a problem when compiling mysys/get_opt.c, just remove the If you get a problem when compiling mysys/get_opt.c, just remove the
line #define _NO_PROTO from the start of that file! line #define _NO_PROTO from the start of that file!
...@@ -8263,7 +8264,8 @@ FreeBSD is also known to have a very low default file handle limit. ...@@ -8263,7 +8264,8 @@ FreeBSD is also known to have a very low default file handle limit.
safe_mysqld or raise the limits for the @code{mysqld} user in /etc/login.conf safe_mysqld or raise the limits for the @code{mysqld} user in /etc/login.conf
(and rebuild it with cap_mkdb /etc/login.conf). Also be sure you set the (and rebuild it with cap_mkdb /etc/login.conf). Also be sure you set the
appropriate class for this user in the password file if you are not appropriate class for this user in the password file if you are not
using the default (use: chpass mysqld-user-name). @xref{safe_mysqld}. using the default (use: chpass mysqld-user-name). @xref{safe_mysqld,
@code{safe_mysqld}}.
If you get problems with the current date in @strong{MySQL}, setting the If you get problems with the current date in @strong{MySQL}, setting the
@code{TZ} variable will probably help. @xref{Environment variables}. @code{TZ} variable will probably help. @xref{Environment variables}.
...@@ -9679,7 +9681,7 @@ mysqld: Can't find file: 'host.frm' ...@@ -9679,7 +9681,7 @@ mysqld: Can't find file: 'host.frm'
The above may also happen with a binary @strong{MySQL} distribution if you The above may also happen with a binary @strong{MySQL} distribution if you
don't start @strong{MySQL} by executing exactly @code{./bin/safe_mysqld}! don't start @strong{MySQL} by executing exactly @code{./bin/safe_mysqld}!
@xref{safe_mysqld}. @xref{safe_mysqld, @code{safe_mysqld}}.
You might need to run @code{mysql_install_db} as @code{root}. However, You might need to run @code{mysql_install_db} as @code{root}. However,
if you prefer, you can run the @strong{MySQL} server as an unprivileged if you prefer, you can run the @strong{MySQL} server as an unprivileged
...@@ -9980,7 +9982,8 @@ system startup and shutdown, and is described more fully in ...@@ -9980,7 +9982,8 @@ system startup and shutdown, and is described more fully in
@item @item
By invoking @code{safe_mysqld}, which tries to determine the proper options By invoking @code{safe_mysqld}, which tries to determine the proper options
for @code{mysqld} and then runs it with those options. @xref{safe_mysqld}. for @code{mysqld} and then runs it with those options. @xref{safe_mysqld,
@code{safe_mysqld}}.
@item @item
On NT you should install @code{mysqld} as a service as follows: On NT you should install @code{mysqld} as a service as follows:
...@@ -10229,7 +10232,8 @@ though. ...@@ -10229,7 +10232,8 @@ though.
@item --core-file @item --core-file
Write a core file if @code{mysqld} dies. For some systems you must also Write a core file if @code{mysqld} dies. For some systems you must also
specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld}. specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld,
@code{safe_mysqld}}.
@item -h, --datadir=path @item -h, --datadir=path
Path to the database root. Path to the database root.
...@@ -24295,6 +24299,14 @@ tables are: ...@@ -24295,6 +24299,14 @@ tables are:
@item Tables are compressed with @code{pack_isam} rather than with @code{myisampack}. @item Tables are compressed with @code{pack_isam} rather than with @code{myisampack}.
@end itemize @end itemize
If you want to convert an @code{ISAM} table to a @code{MyISAM} table so
that you can use utilities such as @code{mysqlcheck}, use an @code{ALTER
TABLE} statement:
@example
mysql> ALTER TABLE tbl_name TYPE = MYISAM;
@end example
@cindex tables, @code{HEAP} @cindex tables, @code{HEAP}
@node HEAP, BDB, ISAM, Table types @node HEAP, BDB, ISAM, Table types
@section HEAP Tables @section HEAP Tables
...@@ -24422,7 +24434,7 @@ this. @xref{Table handler support}. ...@@ -24422,7 +24434,7 @@ this. @xref{Table handler support}.
If you have downloaded a binary version of @strong{MySQL} that includes If you have downloaded a binary version of @strong{MySQL} that includes
support for BerkeleyDB, simply follow the instructions for installing a support for BerkeleyDB, simply follow the instructions for installing a
binary version of @strong{MySQL}. binary version of @strong{MySQL}.
@xref{Installing binary}. @xref{mysqld-max}. @xref{Installing binary}. @xref{mysqld-max, @code{mysqld-max}}.
To compile @strong{MySQL} with Berkeley DB support, download @strong{MySQL} To compile @strong{MySQL} with Berkeley DB support, download @strong{MySQL}
Version 3.23.34 or newer and configure @code{MySQL} with the Version 3.23.34 or newer and configure @code{MySQL} with the
...@@ -25534,7 +25546,7 @@ binary. ...@@ -25534,7 +25546,7 @@ binary.
If you have downloaded a binary version of @strong{MySQL} that includes If you have downloaded a binary version of @strong{MySQL} that includes
support for InnoDB (mysqld-max), simply follow the instructions for support for InnoDB (mysqld-max), simply follow the instructions for
installing a binary version of @strong{MySQL}. @xref{Installing binary}. installing a binary version of @strong{MySQL}. @xref{Installing binary}.
@xref{mysqld-max}. @xref{mysqld-max, @code{mysqld-max}}.
To compile @strong{MySQL} with InnoDB support, download MySQL-3.23.37 or newer To compile @strong{MySQL} with InnoDB support, download MySQL-3.23.37 or newer
and configure @code{MySQL} with the @code{--with-innodb} option. and configure @code{MySQL} with the @code{--with-innodb} option.
...@@ -26309,7 +26321,7 @@ time will be longer. ...@@ -26309,7 +26321,7 @@ time will be longer.
Also the log buffer should be quite big, say 8 MB. Also the log buffer should be quite big, say 8 MB.
@strong{6.} (Relevant from 3.23.39 up.) @strong{6.} (Relevant from 3.23.39 up.)
In some versions of Linux and other Unixes flushing files to disk with the Unix In some versions of Linux and Unix, flushing files to disk with the Unix
@code{fdatasync} and other similar methods is surprisingly slow. @code{fdatasync} and other similar methods is surprisingly slow.
The default method InnoDB uses is the @code{fdatasync} function. The default method InnoDB uses is the @code{fdatasync} function.
If you are not satisfied with the database write performance, you may If you are not satisfied with the database write performance, you may
...@@ -26590,11 +26602,11 @@ integer that can be stored in the specified integer type. ...@@ -26590,11 +26602,11 @@ integer that can be stored in the specified integer type.
In disk i/o InnoDB uses asynchronous i/o. On Windows NT In disk i/o InnoDB uses asynchronous i/o. On Windows NT
it uses the native asynchronous i/o provided by the operating system. it uses the native asynchronous i/o provided by the operating system.
On Unixes InnoDB uses simulated asynchronous i/o built On Unix, InnoDB uses simulated asynchronous i/o built
into InnoDB: InnoDB creates a number of i/o threads to take care into InnoDB: InnoDB creates a number of i/o threads to take care
of i/o operations, such as read-ahead. In a future version we will of i/o operations, such as read-ahead. In a future version we will
add support for simulated aio on Windows NT and native aio on those add support for simulated aio on Windows NT and native aio on those
Unixes which have one. versions of Unix which have one.
On Windows NT InnoDB uses non-buffered i/o. That means that the disk On Windows NT InnoDB uses non-buffered i/o. That means that the disk
pages InnoDB reads or writes are not buffered in the operating system pages InnoDB reads or writes are not buffered in the operating system
...@@ -26605,7 +26617,7 @@ just define the raw disk in place of a data file in @file{my.cnf}. ...@@ -26605,7 +26617,7 @@ just define the raw disk in place of a data file in @file{my.cnf}.
You must give the exact size in bytes of the raw disk in @file{my.cnf}, You must give the exact size in bytes of the raw disk in @file{my.cnf},
because at startup InnoDB checks that the size of the file because at startup InnoDB checks that the size of the file
is the same as specified in the configuration file. Using a raw disk is the same as specified in the configuration file. Using a raw disk
you can on some Unixes perform non-buffered i/o. you can on some versions of Unix perform non-buffered i/o.
There are two read-ahead heuristics in InnoDB: sequential read-ahead There are two read-ahead heuristics in InnoDB: sequential read-ahead
and random read-ahead. In sequential read-ahead InnoDB notices that and random read-ahead. In sequential read-ahead InnoDB notices that
...@@ -26784,7 +26796,7 @@ the maximum size for a table. The minimum tablespace size is 10 MB. ...@@ -26784,7 +26796,7 @@ the maximum size for a table. The minimum tablespace size is 10 MB.
Contact information of Innobase Oy, producer of the InnoDB engine: Contact information of Innobase Oy, producer of the InnoDB engine:
@example @example
Website: www.innobase.fi Website: www.innodb.com
Heikki.Tuuri@@innobase.inet.fi Heikki.Tuuri@@innobase.inet.fi
phone: 358-9-6969 3250 (office) 358-40-5617367 (mobile) phone: 358-9-6969 3250 (office) 358-40-5617367 (mobile)
InnoDB Oy Inc. InnoDB Oy Inc.
...@@ -33177,7 +33189,7 @@ with the @code{-max} prefix. This makes it very easy to test out a ...@@ -33177,7 +33189,7 @@ with the @code{-max} prefix. This makes it very easy to test out a
another @code{mysqld} binary in an existing installation. Just another @code{mysqld} binary in an existing installation. Just
run @code{configure} with the options you want and then install the run @code{configure} with the options you want and then install the
new @code{mysqld} binary as @code{mysqld-max} in the same directory new @code{mysqld} binary as @code{mysqld-max} in the same directory
where your old @code{mysqld} binary is. @xref{safe_mysqld}. where your old @code{mysqld} binary is. @xref{safe_mysqld, @code{safe_mysqld}}.
The @code{mysqld-max} RPM uses the above mentioned @code{safe_mysqld} The @code{mysqld-max} RPM uses the above mentioned @code{safe_mysqld}
feature. It just installs the @code{mysqld-max} executable and feature. It just installs the @code{mysqld-max} executable and
...@@ -33425,7 +33437,7 @@ MY_PWD=`pwd` Check if we are starting this relative (for the binary ...@@ -33425,7 +33437,7 @@ MY_PWD=`pwd` Check if we are starting this relative (for the binary
release) if test -d /data/mysql -a -f ./share/mysql/english/errmsg.sys release) if test -d /data/mysql -a -f ./share/mysql/english/errmsg.sys
-a -x ./bin/mysqld -a -x ./bin/mysqld
-------------------------------------------------------------------------- --------------------------------------------------------------------------
@xref{safe_mysqld}. @xref{safe_mysqld, @code{safe_mysqld}}.
@end example @end example
The above test should be successful, or you may encounter problems. The above test should be successful, or you may encounter problems.
@item @item
...@@ -33953,7 +33965,7 @@ server). The dump will contain SQL statements to create the table ...@@ -33953,7 +33965,7 @@ server). The dump will contain SQL statements to create the table
and/or populate the table. and/or populate the table.
If you are doing a backup on the server, you should consider using If you are doing a backup on the server, you should consider using
the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy}. the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy, @code{mysqlhotcopy}}.
@example @example
shell> mysqldump [OPTIONS] database [tables] shell> mysqldump [OPTIONS] database [tables]
...@@ -39158,7 +39170,8 @@ If you want to make a SQL level backup of a table, you can use ...@@ -39158,7 +39170,8 @@ If you want to make a SQL level backup of a table, you can use
TABLE}. @xref{SELECT}. @xref{BACKUP TABLE}. TABLE}. @xref{SELECT}. @xref{BACKUP TABLE}.
Another way to back up a database is to use the @code{mysqldump} program or Another way to back up a database is to use the @code{mysqldump} program or
the @code{mysqlhotcopy script}. @xref{mysqldump}. @xref{mysqlhotcopy}. the @code{mysqlhotcopy script}. @xref{mysqldump, @code{mysqldump}}.
@xref{mysqlhotcopy, @code{mysqlhotcopy}}.
@enumerate @enumerate
@item @item
...@@ -46468,8 +46481,8 @@ read by @code{mysql_options()}. ...@@ -46468,8 +46481,8 @@ read by @code{mysql_options()}.
Added new options @code{--pager[=...]}, @code{--no-pager}, Added new options @code{--pager[=...]}, @code{--no-pager},
@code{--tee=...} and @code{--no-tee} to the @code{mysql} client. The @code{--tee=...} and @code{--no-tee} to the @code{mysql} client. The
new corresponding interactive commands are @code{pager}, @code{nopager}, new corresponding interactive commands are @code{pager}, @code{nopager},
@code{tee} and @code{notee}. @xref{mysql}, @code{mysql --help} and the @code{tee} and @code{notee}. @xref{mysql, @code{mysql}}, @code{mysql --help}
interactive help for more information. and the interactive help for more information.
@item @item
Fixed crash when automatic repair of @code{MyISAM} table failed. Fixed crash when automatic repair of @code{MyISAM} table failed.
@item @item
...@@ -69,10 +69,6 @@ ...@@ -69,10 +69,6 @@
# define memmove(d, s, n) bmove((d), (s), (n)) /* our bmove */ # define memmove(d, s, n) bmove((d), (s), (n)) /* our bmove */
#endif #endif
#if defined(HAVE_STPCPY) && !defined(HAVE_mit_thread)
#define strmov(A,B) stpcpy((A),(B))
#endif
/* Unixware 7 */ /* Unixware 7 */
#if !defined(HAVE_BFILL) #if !defined(HAVE_BFILL)
# define bfill(A,B,C) memset((A),(C),(B)) # define bfill(A,B,C) memset((A),(C),(B))
...@@ -90,6 +86,13 @@ ...@@ -90,6 +86,13 @@
extern "C" { extern "C" {
#endif #endif
#if defined(HAVE_STPCPY) && !defined(HAVE_mit_thread)
#define strmov(A,B) stpcpy((A),(B))
#ifndef stpcpy
extern char *stpcpy(char *, const char *); /* For AIX with gcc 2.95.3 */
#endif
#endif
extern char NEAR _dig_vec[]; /* Declared in int2str() */ extern char NEAR _dig_vec[]; /* Declared in int2str() */
#ifdef BAD_STRING_COMPILER #ifdef BAD_STRING_COMPILER
...@@ -148,7 +151,7 @@ extern void bchange(char *dst,uint old_len,const char *src, ...@@ -148,7 +151,7 @@ extern void bchange(char *dst,uint old_len,const char *src,
uint new_len,uint tot_len); uint new_len,uint tot_len);
extern void strappend(char *s,uint len,pchar fill); extern void strappend(char *s,uint len,pchar fill);
extern char *strend(const char *s); extern char *strend(const char *s);
extern char *strcend(const char *, pchar); extern char *strcend(const char *, pchar);
extern char *strfield(char *src,int fields,int chars,int blanks, extern char *strfield(char *src,int fields,int chars,int blanks,
int tabch); int tabch);
extern char *strfill(my_string s,uint len,pchar fill); extern char *strfill(my_string s,uint len,pchar fill);
......
...@@ -2238,12 +2238,93 @@ btr_check_node_ptr( ...@@ -2238,12 +2238,93 @@ btr_check_node_ptr(
return(TRUE); return(TRUE);
} }
/****************************************************************
Checks the size and number of fields in a record based on the definition of
the index. */
static
ibool
btr_index_rec_validate(
/*====================*/
/* out: TRUE if ok */
rec_t* rec, /* in: index record */
dict_index_t* index) /* in: index */
{
dtype_t* type;
byte* data;
ulint len;
ulint n;
ulint i;
n = dict_index_get_n_fields(index);
if (rec_get_n_fields(rec) != n) {
fprintf(stderr, "Record has %lu fields, should have %lu\n",
rec_get_n_fields(rec), n);
return(FALSE);
}
for (i = 0; i < n; i++) {
data = rec_get_nth_field(rec, i, &len);
type = dict_index_get_nth_type(index, i);
if (len != UNIV_SQL_NULL && dtype_is_fixed_size(type)
&& len != dtype_get_fixed_size(type)) {
fprintf(stderr,
"Record field %lu len is %lu, should be %lu\n",
i, len, dtype_get_fixed_size(type));
return(FALSE);
}
}
return(TRUE);
}
/****************************************************************
Checks the size and number of fields in records based on the definition of
the index. */
static
ibool
btr_index_page_validate(
/*====================*/
/* out: TRUE if ok */
page_t* page, /* in: index page */
dict_index_t* index) /* in: index */
{
rec_t* rec;
page_cur_t cur;
ibool ret = TRUE;
page_cur_set_before_first(page, &cur);
page_cur_move_to_next(&cur);
for (;;) {
rec = (&cur)->rec;
if (page_cur_is_after_last(&cur)) {
break;
}
if (!btr_index_rec_validate(rec, index)) {
ret = FALSE;
}
page_cur_move_to_next(&cur);
}
return(ret);
}
/**************************************************************** /****************************************************************
Validates index tree level. */ Validates index tree level. */
static static
void ibool
btr_validate_level( btr_validate_level(
/*===============*/ /*===============*/
/* out: TRUE if ok */
dict_tree_t* tree, /* in: index tree */ dict_tree_t* tree, /* in: index tree */
ulint level) /* in: level number */ ulint level) /* in: level number */
{ {
...@@ -2260,7 +2341,9 @@ btr_validate_level( ...@@ -2260,7 +2341,9 @@ btr_validate_level(
page_cur_t cursor; page_cur_t cursor;
mem_heap_t* heap; mem_heap_t* heap;
dtuple_t* node_ptr_tuple; dtuple_t* node_ptr_tuple;
ibool ret = TRUE;
dict_index_t* index;
mtr_start(&mtr); mtr_start(&mtr);
page = btr_root_get(tree, &mtr); page = btr_root_get(tree, &mtr);
...@@ -2278,13 +2361,31 @@ btr_validate_level( ...@@ -2278,13 +2361,31 @@ btr_validate_level(
page = btr_node_ptr_get_child(node_ptr, &mtr); page = btr_node_ptr_get_child(node_ptr, &mtr);
} }
index = UT_LIST_GET_FIRST(tree->tree_indexes);
/* Now we are on the desired level */ /* Now we are on the desired level */
loop: loop:
mtr_x_lock(dict_tree_get_lock(tree), &mtr); mtr_x_lock(dict_tree_get_lock(tree), &mtr);
/* Check ordering of records */ /* Check ordering etc. of records */
page_validate(page, UT_LIST_GET_FIRST(tree->tree_indexes));
if (!page_validate(page, index)) {
fprintf(stderr, "Error in page %lu in index %s\n",
buf_frame_get_page_no(page), index->name);
ret = FALSE;
}
if (level == 0) {
if (!btr_index_page_validate(page, index)) {
fprintf(stderr,
"Error in page %lu in index %s\n",
buf_frame_get_page_no(page), index->name);
ret = FALSE;
}
}
ut_a(btr_page_get_level(page, &mtr) == level); ut_a(btr_page_get_level(page, &mtr) == level);
right_page_no = btr_page_get_next(page, &mtr); right_page_no = btr_page_get_next(page, &mtr);
...@@ -2374,14 +2475,17 @@ loop: ...@@ -2374,14 +2475,17 @@ loop:
goto loop; goto loop;
} }
return(ret);
} }
/****************************************************************** /******************************************************************
Checks the consistency of an index tree. */ Checks the consistency of an index tree. */
void ibool
btr_validate_tree( btr_validate_tree(
/*==============*/ /*==============*/
/* out: TRUE if ok */
dict_tree_t* tree) /* in: tree */ dict_tree_t* tree) /* in: tree */
{ {
mtr_t mtr; mtr_t mtr;
...@@ -2397,8 +2501,15 @@ btr_validate_tree( ...@@ -2397,8 +2501,15 @@ btr_validate_tree(
for (i = 0; i <= n; i++) { for (i = 0; i <= n; i++) {
btr_validate_level(tree, n - i); if (!btr_validate_level(tree, n - i)) {
mtr_commit(&mtr);
return(FALSE);
}
} }
mtr_commit(&mtr); mtr_commit(&mtr);
return(TRUE);
} }
...@@ -163,9 +163,14 @@ btr_cur_search_to_nth_level( ...@@ -163,9 +163,14 @@ btr_cur_search_to_nth_level(
BTR_INSERT and BTR_ESTIMATE; BTR_INSERT and BTR_ESTIMATE;
cursor->left_page is used to store a pointer cursor->left_page is used to store a pointer
to the left neighbor page, in the cases to the left neighbor page, in the cases
BTR_SEARCH_PREV and BTR_MODIFY_PREV */ BTR_SEARCH_PREV and BTR_MODIFY_PREV;
NOTE that if has_search_latch
is != 0, we maybe do not have a latch set
on the cursor page, we assume
the caller uses his search latch
to protect the record! */
btr_cur_t* cursor, /* in/out: tree cursor; the cursor page is btr_cur_t* cursor, /* in/out: tree cursor; the cursor page is
s- or x-latched */ s- or x-latched, but see also above! */
ulint has_search_latch,/* in: info on the latch mode the ulint has_search_latch,/* in: info on the latch mode the
caller currently has on btr_search_latch: caller currently has on btr_search_latch:
RW_S_LATCH, or 0 */ RW_S_LATCH, or 0 */
......
...@@ -601,7 +601,12 @@ btr_search_guess_on_hash( ...@@ -601,7 +601,12 @@ btr_search_guess_on_hash(
btr_search_t* info, /* in: index search info */ btr_search_t* info, /* in: index search info */
dtuple_t* tuple, /* in: logical record */ dtuple_t* tuple, /* in: logical record */
ulint mode, /* in: PAGE_CUR_L, ... */ ulint mode, /* in: PAGE_CUR_L, ... */
ulint latch_mode, /* in: BTR_SEARCH_LEAF, ... */ ulint latch_mode, /* in: BTR_SEARCH_LEAF, ...;
NOTE that only if has_search_latch
is 0, we will have a latch set on
the cursor page, otherwise we assume
the caller uses his search latch
to protect the record! */
btr_cur_t* cursor, /* out: tree cursor */ btr_cur_t* cursor, /* out: tree cursor */
ulint has_search_latch,/* in: latch mode the caller ulint has_search_latch,/* in: latch mode the caller
currently has on btr_search_latch: currently has on btr_search_latch:
...@@ -722,7 +727,9 @@ btr_search_guess_on_hash( ...@@ -722,7 +727,9 @@ btr_search_guess_on_hash(
} }
if (!success) { if (!success) {
btr_leaf_page_release(page, latch_mode, mtr); if (!has_search_latch) {
btr_leaf_page_release(page, latch_mode, mtr);
}
goto failure; goto failure;
} }
......
...@@ -376,9 +376,10 @@ btr_print_tree( ...@@ -376,9 +376,10 @@ btr_print_tree(
/****************************************************************** /******************************************************************
Checks the consistency of an index tree. */ Checks the consistency of an index tree. */
void ibool
btr_validate_tree( btr_validate_tree(
/*==============*/ /*==============*/
/* out: TRUE if ok */
dict_tree_t* tree); /* in: tree */ dict_tree_t* tree); /* in: tree */
#define BTR_N_LEAF_PAGES 1 #define BTR_N_LEAF_PAGES 1
......
...@@ -98,12 +98,18 @@ btr_cur_search_to_nth_level( ...@@ -98,12 +98,18 @@ btr_cur_search_to_nth_level(
the previous page of the record! Inserts the previous page of the record! Inserts
should always be made using PAGE_CUR_LE to should always be made using PAGE_CUR_LE to
search the position! */ search the position! */
ulint latch_mode, /* in: BTR_SEARCH_LEAF, ...; ulint latch_mode, /* in: BTR_SEARCH_LEAF, ..., ORed with
BTR_INSERT and BTR_ESTIMATE;
cursor->left_page is used to store a pointer cursor->left_page is used to store a pointer
to the left neighbor page, in the cases to the left neighbor page, in the cases
BTR_SEARCH_PREV and BTR_MODIFY_PREV */ BTR_SEARCH_PREV and BTR_MODIFY_PREV;
btr_cur_t* cursor, /* out: tree cursor; the cursor page is s- or NOTE that if has_search_latch
x-latched */ is != 0, we maybe do not have a latch set
on the cursor page, we assume
the caller uses his search latch
to protect the record! */
btr_cur_t* cursor, /* in/out: tree cursor; the cursor page is
s- or x-latched, but see also above! */
ulint has_search_latch,/* in: latch mode the caller ulint has_search_latch,/* in: latch mode the caller
currently has on btr_search_latch: currently has on btr_search_latch:
RW_S_LATCH, or 0 */ RW_S_LATCH, or 0 */
......
...@@ -87,7 +87,11 @@ btr_pcur_open_with_no_init( ...@@ -87,7 +87,11 @@ btr_pcur_open_with_no_init(
PAGE_CUR_LE, not PAGE_CUR_GE, as the latter PAGE_CUR_LE, not PAGE_CUR_GE, as the latter
may end up on the previous page of the may end up on the previous page of the
record! */ record! */
ulint latch_mode,/* in: BTR_SEARCH_LEAF, ... */ ulint latch_mode,/* in: BTR_SEARCH_LEAF, ...;
NOTE that if has_search_latch != 0 then
we maybe do not acquire a latch on the cursor
page, but assume that the caller uses his
btr search latch to protect the record! */
btr_pcur_t* cursor, /* in: memory buffer for persistent cursor */ btr_pcur_t* cursor, /* in: memory buffer for persistent cursor */
ulint has_search_latch,/* in: latch mode the caller ulint has_search_latch,/* in: latch mode the caller
currently has on btr_search_latch: currently has on btr_search_latch:
......
...@@ -492,7 +492,11 @@ btr_pcur_open_with_no_init( ...@@ -492,7 +492,11 @@ btr_pcur_open_with_no_init(
PAGE_CUR_LE, not PAGE_CUR_GE, as the latter PAGE_CUR_LE, not PAGE_CUR_GE, as the latter
may end up on the previous page of the may end up on the previous page of the
record! */ record! */
ulint latch_mode,/* in: BTR_SEARCH_LEAF, ... */ ulint latch_mode,/* in: BTR_SEARCH_LEAF, ...;
NOTE that if has_search_latch != 0 then
we maybe do not acquire a latch on the cursor
page, but assume that the caller uses his
btr search latch to protect the record! */
btr_pcur_t* cursor, /* in: memory buffer for persistent cursor */ btr_pcur_t* cursor, /* in: memory buffer for persistent cursor */
ulint has_search_latch,/* in: latch mode the caller ulint has_search_latch,/* in: latch mode the caller
currently has on btr_search_latch: currently has on btr_search_latch:
......
...@@ -229,6 +229,15 @@ row_rename_table_for_mysql( ...@@ -229,6 +229,15 @@ row_rename_table_for_mysql(
char* old_name, /* in: old table name */ char* old_name, /* in: old table name */
char* new_name, /* in: new table name */ char* new_name, /* in: new table name */
trx_t* trx); /* in: transaction handle */ trx_t* trx); /* in: transaction handle */
/*************************************************************************
Checks a table for corruption. */
ulint
row_check_table_for_mysql(
/*======================*/
/* out: DB_ERROR or DB_SUCCESS */
row_prebuilt_t* prebuilt); /* in: prebuilt struct in MySQL
handle */
/* A struct describing a place for an individual column in the MySQL /* A struct describing a place for an individual column in the MySQL
row format which is presented to the table handler in ha_innobase. row format which is presented to the table handler in ha_innobase.
...@@ -281,7 +290,8 @@ struct row_prebuilt_struct { ...@@ -281,7 +290,8 @@ struct row_prebuilt_struct {
is set to TRUE */ is set to TRUE */
dict_index_t* index; /* current index for a search, if any */ dict_index_t* index; /* current index for a search, if any */
ulint template_type; /* ROW_MYSQL_WHOLE_ROW, ulint template_type; /* ROW_MYSQL_WHOLE_ROW,
ROW_MYSQL_REC_FIELDS or ROW_MYSQL_REC_FIELDS,
ROW_MYSQL_DUMMY_TEMPLATE, or
ROW_MYSQL_NO_TEMPLATE */ ROW_MYSQL_NO_TEMPLATE */
ulint n_template; /* number of elements in the ulint n_template; /* number of elements in the
template */ template */
...@@ -359,6 +369,8 @@ struct row_prebuilt_struct { ...@@ -359,6 +369,8 @@ struct row_prebuilt_struct {
#define ROW_MYSQL_WHOLE_ROW 0 #define ROW_MYSQL_WHOLE_ROW 0
#define ROW_MYSQL_REC_FIELDS 1 #define ROW_MYSQL_REC_FIELDS 1
#define ROW_MYSQL_NO_TEMPLATE 2 #define ROW_MYSQL_NO_TEMPLATE 2
#define ROW_MYSQL_DUMMY_TEMPLATE 3 /* dummy template used in
row_scan_and_check_index */
#ifndef UNIV_NONINL #ifndef UNIV_NONINL
#include "row0mysql.ic" #include "row0mysql.ic"
......
...@@ -24,6 +24,13 @@ saving CPU time. The kernel mutex contention is increased, however. */ ...@@ -24,6 +24,13 @@ saving CPU time. The kernel mutex contention is increased, however. */
extern ulint trx_n_mysql_transactions; extern ulint trx_n_mysql_transactions;
/************************************************************************
Releases the search latch if trx has reserved it. */
void
trx_search_latch_release_if_reserved(
/*=================================*/
trx_t* trx); /* in: transaction */
/******************************************************************** /********************************************************************
Retrieves the error_info field from a trx. */ Retrieves the error_info field from a trx. */
...@@ -282,6 +289,13 @@ struct trx_struct{ ...@@ -282,6 +289,13 @@ struct trx_struct{
ulint n_mysql_tables_in_use; /* number of Innobase tables ulint n_mysql_tables_in_use; /* number of Innobase tables
used in the processing of the current used in the processing of the current
SQL statement in MySQL */ SQL statement in MySQL */
ulint mysql_n_tables_locked;
/* how many tables the current SQL
statement uses, except those
in consistent read */
ibool has_search_latch;
/* TRUE if this trx has latched the
search system latch in S-mode */
ibool ignore_duplicates_in_insert; ibool ignore_duplicates_in_insert;
/* in an insert roll back only insert /* in an insert roll back only insert
of the latest row in case of the latest row in case
......
...@@ -1199,8 +1199,16 @@ page_rec_validate( ...@@ -1199,8 +1199,16 @@ page_rec_validate(
n_owned = rec_get_n_owned(rec); n_owned = rec_get_n_owned(rec);
heap_no = rec_get_heap_no(rec); heap_no = rec_get_heap_no(rec);
ut_a(n_owned <= PAGE_DIR_SLOT_MAX_N_OWNED); if (!(n_owned <= PAGE_DIR_SLOT_MAX_N_OWNED)) {
ut_a(heap_no < page_header_get_field(page, PAGE_N_HEAP)); fprintf(stderr, "Dir slot n owned too big %lu\n", n_owned);
return(FALSE);
}
if (!(heap_no < page_header_get_field(page, PAGE_N_HEAP))) {
fprintf(stderr, "Heap no too big %lu %lu\n", heap_no,
page_header_get_field(page, PAGE_N_HEAP));
return(FALSE);
}
return(TRUE); return(TRUE);
} }
...@@ -1216,20 +1224,21 @@ page_validate( ...@@ -1216,20 +1224,21 @@ page_validate(
dict_index_t* index) /* in: data dictionary index containing dict_index_t* index) /* in: data dictionary index containing
the page record type definition */ the page record type definition */
{ {
page_dir_slot_t* slot;
mem_heap_t* heap; mem_heap_t* heap;
page_cur_t cur;
byte* buf; byte* buf;
ulint i; ulint i;
ulint count; ulint count;
ulint own_count; ulint own_count;
ulint slot_no; ulint slot_no;
ulint data_size; ulint data_size;
page_cur_t cur;
rec_t* rec; rec_t* rec;
rec_t* old_rec = NULL; rec_t* old_rec = NULL;
page_dir_slot_t* slot;
ulint offs; ulint offs;
ulint n_slots; ulint n_slots;
ibool ret = FALSE;
heap = mem_heap_create(UNIV_PAGE_SIZE); heap = mem_heap_create(UNIV_PAGE_SIZE);
/* The following buffer is used to check that the /* The following buffer is used to check that the
...@@ -1244,8 +1253,16 @@ page_validate( ...@@ -1244,8 +1253,16 @@ page_validate(
overlap. */ overlap. */
n_slots = page_dir_get_n_slots(page); n_slots = page_dir_get_n_slots(page);
ut_ad(page_header_get_ptr(page, PAGE_HEAP_TOP) <=
page_dir_get_nth_slot(page, n_slots - 1)); if (!(page_header_get_ptr(page, PAGE_HEAP_TOP) <=
page_dir_get_nth_slot(page, n_slots - 1))) {
fprintf(stderr,
"Record heap and dir overlap on a page in index %s, %lu, %lu\n",
index->name, page_header_get_ptr(page, PAGE_HEAP_TOP),
page_dir_get_nth_slot(page, n_slots - 1));
goto func_exit;
}
/* Validate the record list in a loop checking also that /* Validate the record list in a loop checking also that
it is consistent with the directory. */ it is consistent with the directory. */
...@@ -1259,11 +1276,20 @@ page_validate( ...@@ -1259,11 +1276,20 @@ page_validate(
for (;;) { for (;;) {
rec = (&cur)->rec; rec = (&cur)->rec;
page_rec_validate(rec);
if (!page_rec_validate(rec)) {
goto func_exit;
}
/* Check that the records are in the ascending order */ /* Check that the records are in the ascending order */
if ((count >= 2) && (!page_cur_is_after_last(&cur))) { if ((count >= 2) && (!page_cur_is_after_last(&cur))) {
ut_a(1 == cmp_rec_rec(rec, old_rec, index)); if (!(1 == cmp_rec_rec(rec, old_rec, index))) {
fprintf(stderr,
"Records in wrong order in index %s\n",
index->name);
goto func_exit;
}
} }
if ((rec != page_get_supremum_rec(page)) if ((rec != page_get_supremum_rec(page))
...@@ -1275,16 +1301,38 @@ page_validate( ...@@ -1275,16 +1301,38 @@ page_validate(
offs = rec_get_start(rec) - page; offs = rec_get_start(rec) - page;
for (i = 0; i < rec_get_size(rec); i++) { for (i = 0; i < rec_get_size(rec); i++) {
ut_a(buf[offs + i] == 0); /* No other record may if (!buf[offs + i] == 0) {
overlap this */ /* No other record may overlap this */
fprintf(stderr,
"Record overlaps another in index %s \n",
index->name);
goto func_exit;
}
buf[offs + i] = 1; buf[offs + i] = 1;
} }
if (rec_get_n_owned(rec) != 0) { if (rec_get_n_owned(rec) != 0) {
/* This is a record pointed to by a dir slot */ /* This is a record pointed to by a dir slot */
ut_a(rec_get_n_owned(rec) == own_count); if (rec_get_n_owned(rec) != own_count) {
fprintf(stderr,
"Wrong owned count %lu, %lu, in index %s\n",
rec_get_n_owned(rec), own_count,
index->name);
ut_a(page_dir_slot_get_rec(slot) == rec); goto func_exit;
}
if (page_dir_slot_get_rec(slot) != rec) {
fprintf(stderr,
"Dir slot does not point to right rec in %s\n",
index->name);
goto func_exit;
}
page_dir_slot_check(slot); page_dir_slot_check(slot);
own_count = 0; own_count = 0;
...@@ -1297,45 +1345,89 @@ page_validate( ...@@ -1297,45 +1345,89 @@ page_validate(
if (page_cur_is_after_last(&cur)) { if (page_cur_is_after_last(&cur)) {
break; break;
} }
count++; if (rec_get_next_offs(rec) < FIL_PAGE_DATA
|| rec_get_next_offs(rec) >= UNIV_PAGE_SIZE) {
fprintf(stderr,
"Next record offset wrong %lu in index %s\n",
rec_get_next_offs(rec), index->name);
goto func_exit;
}
count++;
page_cur_move_to_next(&cur); page_cur_move_to_next(&cur);
own_count++; own_count++;
old_rec = rec; old_rec = rec;
} }
ut_a(rec_get_n_owned(rec) != 0); if (rec_get_n_owned(rec) == 0) {
ut_a(slot_no == n_slots - 1); fprintf(stderr, "n owned is zero in index %s\n", index->name);
ut_a(page_header_get_field(page, PAGE_N_RECS) + 2 == count + 1);
goto func_exit;
}
if (slot_no != n_slots - 1) {
fprintf(stderr, "n slots wrong %lu %lu in index %s\n",
slot_no, n_slots - 1, index->name);
goto func_exit;
}
if (page_header_get_field(page, PAGE_N_RECS) + 2 != count + 1) {
fprintf(stderr, "n recs wrong %lu %lu in index %s\n",
page_header_get_field(page, PAGE_N_RECS) + 2, count + 1,
index->name);
goto func_exit;
}
if (data_size != page_get_data_size(page)) { if (data_size != page_get_data_size(page)) {
printf("Summed data size %lu, returned by func %lu\n", fprintf(stderr, "Summed data size %lu, returned by func %lu\n",
data_size, page_get_data_size(page)); data_size, page_get_data_size(page));
ut_error; goto func_exit;
} }
/* Check then the free list */ /* Check then the free list */
rec = page_header_get_ptr(page, PAGE_FREE); rec = page_header_get_ptr(page, PAGE_FREE);
while (rec != NULL) { while (rec != NULL) {
page_rec_validate(rec); if (!page_rec_validate(rec)) {
goto func_exit;
}
count++; count++;
offs = rec_get_start(rec) - page; offs = rec_get_start(rec) - page;
for (i = 0; i < rec_get_size(rec); i++) { for (i = 0; i < rec_get_size(rec); i++) {
ut_a(buf[offs + i] == 0);
if (buf[offs + i] != 0) {
fprintf(stderr,
"Record overlaps another in free list, index %s \n",
index->name);
goto func_exit;
}
buf[offs + i] = 1; buf[offs + i] = 1;
} }
rec = page_rec_get_next(rec); rec = page_rec_get_next(rec);
} }
ut_a(page_header_get_field(page, PAGE_N_HEAP) == count + 1); if (page_header_get_field(page, PAGE_N_HEAP) != count + 1) {
fprintf(stderr, "N heap is wrong %lu %lu in index %s\n",
page_header_get_field(page, PAGE_N_HEAP), count + 1,
index->name);
}
ret = TRUE;
func_exit:
mem_heap_free(heap); mem_heap_free(heap);
return(TRUE); return(ret);
} }
/******************************************************************* /*******************************************************************
......
...@@ -177,7 +177,9 @@ cmp_whole_field( ...@@ -177,7 +177,9 @@ cmp_whole_field(
(int)(type->prtype & ~DATA_NOT_NULL), (int)(type->prtype & ~DATA_NOT_NULL),
a, a_length, b, b_length)); a, a_length, b, b_length));
default: default:
assert(0); fprintf(stderr,
"InnoDB: unknown type number %lu\n", data_type);
ut_a(0);
} }
return(0); return(0);
......
...@@ -1129,3 +1129,146 @@ funct_exit: ...@@ -1129,3 +1129,146 @@ funct_exit:
return((int) err); return((int) err);
} }
/*************************************************************************
Checks that the index contains entries in an ascending order, unique
constraint is not broken, and calculates the number of index entries
in the read view of the current transaction. */
static
ibool
row_scan_and_check_index(
/*=====================*/
/* out: TRUE if ok */
row_prebuilt_t* prebuilt, /* in: prebuilt struct in MySQL */
dict_index_t* index, /* in: index */
ulint* n_rows) /* out: number of entries seen in the
current consistent read */
{
mem_heap_t* heap;
dtuple_t* prev_entry = NULL;
ulint matched_fields;
ulint matched_bytes;
byte* buf;
ulint ret;
rec_t* rec;
ibool is_ok = TRUE;
int cmp;
*n_rows = 0;
buf = mem_alloc(UNIV_PAGE_SIZE);
heap = mem_heap_create(100);
/* Make a dummy template in prebuilt, which we will use
in scanning the index entries */
prebuilt->index = index;
prebuilt->sql_stat_start = TRUE;
prebuilt->template_type = ROW_MYSQL_DUMMY_TEMPLATE;
prebuilt->n_template = 0;
prebuilt->need_to_access_clustered = FALSE;
dtuple_set_n_fields(prebuilt->search_tuple, 0);
prebuilt->select_lock_type = LOCK_NONE;
ret = row_search_for_mysql(buf, PAGE_CUR_G, prebuilt, 0, 0);
loop:
if (ret != DB_SUCCESS) {
mem_free(buf);
mem_heap_free(heap);
return(is_ok);
}
*n_rows = *n_rows + 1;
/* row_search... returns the index record in buf, record origin offset
within buf stored in the first 4 bytes, because we have built a dummy
template */
rec = buf + mach_read_from_4(buf);
if (prev_entry != NULL) {
matched_fields = 0;
matched_bytes = 0;
cmp = cmp_dtuple_rec_with_match(prev_entry, rec,
&matched_fields,
&matched_bytes);
if (cmp > 0) {
fprintf(stderr,
"Error: index records in a wrong order in index %s\n",
index->name);
is_ok = FALSE;
} else if ((index->type & DICT_UNIQUE)
&& matched_fields >=
dict_index_get_n_ordering_defined_by_user(index)) {
fprintf(stderr,
"Error: duplicate key in index %s\n",
index->name);
is_ok = FALSE;
}
}
mem_heap_empty(heap);
prev_entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);
ret = row_search_for_mysql(buf, PAGE_CUR_G, prebuilt, 0, ROW_SEL_NEXT);
goto loop;
}
/*************************************************************************
Checks a table for corruption. */
ulint
row_check_table_for_mysql(
/*======================*/
/* out: DB_ERROR or DB_SUCCESS */
row_prebuilt_t* prebuilt) /* in: prebuilt struct in MySQL
handle */
{
dict_table_t* table = prebuilt->table;
dict_index_t* index;
ulint n_rows;
ulint n_rows_in_table;
ulint ret = DB_SUCCESS;
index = dict_table_get_first_index(table);
while (index != NULL) {
/* fprintf(stderr, "Validating index %s\n", index->name); */
if (!btr_validate_tree(index->tree)) {
ret = DB_ERROR;
} else {
if (!row_scan_and_check_index(prebuilt,
index, &n_rows)) {
ret = DB_ERROR;
}
/* fprintf(stderr, "%lu entries in index %s\n", n_rows,
index->name); */
if (index == dict_table_get_first_index(table)) {
n_rows_in_table = n_rows;
} else if (n_rows != n_rows_in_table) {
ret = DB_ERROR;
fprintf(stderr,
"Error: index %s contains %lu entries, should be %lu\n",
index->name, n_rows, n_rows_in_table);
}
}
index = dict_table_get_next_index(index);
}
return(ret);
}
...@@ -2341,6 +2341,65 @@ row_sel_push_cache_row_for_mysql( ...@@ -2341,6 +2341,65 @@ row_sel_push_cache_row_for_mysql(
prebuilt->n_fetch_cached++; prebuilt->n_fetch_cached++;
} }
/*************************************************************************
Tries to do a shortcut to fetch a clustered index record with a unique key,
using the hash index if possible (not always). We assume that the search
mode is PAGE_CUR_GE, it is a consistent read, trx has already a read view,
btr search latch has been locked in S-mode. */
static
ulint
row_sel_try_search_shortcut_for_mysql(
/*==================================*/
/* out: SEL_FOUND, SEL_EXHAUSTED, SEL_RETRY */
rec_t** out_rec,/* out: record if found */
row_prebuilt_t* prebuilt,/* in: prebuilt struct */
mtr_t* mtr) /* in: started mtr */
{
dict_index_t* index = prebuilt->index;
dtuple_t* search_tuple = prebuilt->search_tuple;
btr_pcur_t* pcur = prebuilt->pcur;
trx_t* trx = prebuilt->trx;
rec_t* rec;
ut_ad(index->type & DICT_CLUSTERED);
btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE,
BTR_SEARCH_LEAF, pcur,
RW_S_LATCH, mtr);
rec = btr_pcur_get_rec(pcur);
if (!page_rec_is_user_rec(rec)) {
return(SEL_RETRY);
}
/* As the cursor is now placed on a user record after a search with
the mode PAGE_CUR_GE, the up_match field in the cursor tells how many
fields in the user record matched to the search tuple */
if (btr_pcur_get_up_match(pcur) < dtuple_get_n_fields(search_tuple)) {
return(SEL_EXHAUSTED);
}
/* This is a non-locking consistent read: if necessary, fetch
a previous version of the record */
if (!lock_clust_rec_cons_read_sees(rec, index, trx->read_view)) {
return(SEL_RETRY);
}
if (rec_get_deleted_flag(rec)) {
return(SEL_EXHAUSTED);
}
*out_rec = rec;
return(SEL_FOUND);
}
/************************************************************************ /************************************************************************
Searches for rows in the database. This is used in the interface to Searches for rows in the database. This is used in the interface to
MySQL. This function opens a cursor, and also implements fetch next MySQL. This function opens a cursor, and also implements fetch next
...@@ -2387,6 +2446,7 @@ row_search_for_mysql( ...@@ -2387,6 +2446,7 @@ row_search_for_mysql(
ibool cons_read_requires_clust_rec; ibool cons_read_requires_clust_rec;
ibool was_lock_wait; ibool was_lock_wait;
ulint ret; ulint ret;
ulint shortcut;
ibool unique_search_from_clust_index = FALSE; ibool unique_search_from_clust_index = FALSE;
ibool mtr_has_extra_clust_latch = FALSE; ibool mtr_has_extra_clust_latch = FALSE;
ibool moves_up = FALSE; ibool moves_up = FALSE;
...@@ -2452,6 +2512,8 @@ row_search_for_mysql( ...@@ -2452,6 +2512,8 @@ row_search_for_mysql(
mode = pcur->search_mode; mode = pcur->search_mode;
} }
mtr_start(&mtr);
if (match_mode == ROW_SEL_EXACT && index->type & DICT_UNIQUE if (match_mode == ROW_SEL_EXACT && index->type & DICT_UNIQUE
&& index->type & DICT_CLUSTERED && index->type & DICT_CLUSTERED
&& dtuple_get_n_fields(search_tuple) && dtuple_get_n_fields(search_tuple)
...@@ -2464,6 +2526,8 @@ row_search_for_mysql( ...@@ -2464,6 +2526,8 @@ row_search_for_mysql(
restore cursor position, and must return restore cursor position, and must return
immediately */ immediately */
mtr_commit(&mtr);
return(DB_RECORD_NOT_FOUND); return(DB_RECORD_NOT_FOUND);
} }
...@@ -2472,8 +2536,51 @@ row_search_for_mysql( ...@@ -2472,8 +2536,51 @@ row_search_for_mysql(
mode = PAGE_CUR_GE; mode = PAGE_CUR_GE;
unique_search_from_clust_index = TRUE; unique_search_from_clust_index = TRUE;
if (trx->mysql_n_tables_locked == 0
&& !prebuilt->sql_stat_start) {
/* This is a SELECT query done as a consistent read,
and the read view has already been allocated:
let us try a search shortcut through the hash
index */
if (!trx->has_search_latch) {
rw_lock_s_lock(&btr_search_latch);
trx->has_search_latch = TRUE;
} else if (btr_search_latch.writer_is_wait_ex) {
/* There is an x-latch request waiting:
release the s-latch for a moment to reduce
starvation */
rw_lock_s_unlock(&btr_search_latch);
rw_lock_s_lock(&btr_search_latch);
}
shortcut = row_sel_try_search_shortcut_for_mysql(&rec,
prebuilt, &mtr);
if (shortcut == SEL_FOUND) {
row_sel_store_mysql_rec(buf, prebuilt, rec);
mtr_commit(&mtr);
return(DB_SUCCESS);
} else if (shortcut == SEL_EXHAUSTED) {
mtr_commit(&mtr);
return(DB_RECORD_NOT_FOUND);
}
}
} }
if (trx->has_search_latch) {
rw_lock_s_unlock(&btr_search_latch);
trx->has_search_latch = FALSE;
}
/* Note that if the search mode was GE or G, then the cursor /* Note that if the search mode was GE or G, then the cursor
naturally moves upward (in fetch next) in alphabetical order, naturally moves upward (in fetch next) in alphabetical order,
otherwise downward */ otherwise downward */
...@@ -2485,8 +2592,6 @@ row_search_for_mysql( ...@@ -2485,8 +2592,6 @@ row_search_for_mysql(
} else if (direction == ROW_SEL_NEXT) { } else if (direction == ROW_SEL_NEXT) {
moves_up = TRUE; moves_up = TRUE;
} }
mtr_start(&mtr);
thr = que_fork_get_first_thr(prebuilt->sel_graph); thr = que_fork_get_first_thr(prebuilt->sel_graph);
...@@ -2711,7 +2816,9 @@ rec_loop: ...@@ -2711,7 +2816,9 @@ rec_loop:
if (prebuilt->n_rows_fetched >= MYSQL_FETCH_CACHE_THRESHOLD if (prebuilt->n_rows_fetched >= MYSQL_FETCH_CACHE_THRESHOLD
&& !prebuilt->templ_contains_blob && !prebuilt->templ_contains_blob
&& prebuilt->select_lock_type == LOCK_NONE && prebuilt->select_lock_type == LOCK_NONE
&& !prebuilt->clust_index_was_generated) { && !prebuilt->clust_index_was_generated
&& prebuilt->template_type
!= ROW_MYSQL_DUMMY_TEMPLATE) {
/* Inside an update, for example, we do not cache rows, /* Inside an update, for example, we do not cache rows,
since we may use the cursor position to do the actual since we may use the cursor position to do the actual
...@@ -2726,7 +2833,13 @@ rec_loop: ...@@ -2726,7 +2833,13 @@ rec_loop:
goto next_rec; goto next_rec;
} else { } else {
row_sel_store_mysql_rec(buf, prebuilt, rec); if (prebuilt->template_type == ROW_MYSQL_DUMMY_TEMPLATE) {
ut_memcpy(buf + 4, rec - rec_get_extra_size(rec),
rec_get_size(rec));
mach_write_to_4(buf, rec_get_extra_size(rec) + 4);
} else {
row_sel_store_mysql_rec(buf, prebuilt, rec);
}
if (prebuilt->clust_index_was_generated) { if (prebuilt->clust_index_was_generated) {
row_sel_store_row_id_to_prebuilt(prebuilt, index_rec, row_sel_store_row_id_to_prebuilt(prebuilt, index_rec,
......
...@@ -22,6 +22,7 @@ Created 3/26/1996 Heikki Tuuri ...@@ -22,6 +22,7 @@ Created 3/26/1996 Heikki Tuuri
#include "read0read.h" #include "read0read.h"
#include "srv0srv.h" #include "srv0srv.h"
#include "thr0loc.h" #include "thr0loc.h"
#include "btr0sea.h"
/* Dummy session used currently in MySQL interface */ /* Dummy session used currently in MySQL interface */
sess_t* trx_dummy_sess = NULL; sess_t* trx_dummy_sess = NULL;
...@@ -63,6 +64,7 @@ trx_create( ...@@ -63,6 +64,7 @@ trx_create(
trx->dict_operation = FALSE; trx->dict_operation = FALSE;
trx->n_mysql_tables_in_use = 0; trx->n_mysql_tables_in_use = 0;
trx->mysql_n_tables_locked = 0;
trx->ignore_duplicates_in_insert = FALSE; trx->ignore_duplicates_in_insert = FALSE;
...@@ -96,6 +98,8 @@ trx_create( ...@@ -96,6 +98,8 @@ trx_create(
trx->lock_heap = mem_heap_create_in_buffer(256); trx->lock_heap = mem_heap_create_in_buffer(256);
UT_LIST_INIT(trx->trx_locks); UT_LIST_INIT(trx->trx_locks);
trx->has_search_latch = FALSE;
trx->read_view_heap = mem_heap_create(256); trx->read_view_heap = mem_heap_create(256);
trx->read_view = NULL; trx->read_view = NULL;
...@@ -132,6 +136,21 @@ trx_allocate_for_mysql(void) ...@@ -132,6 +136,21 @@ trx_allocate_for_mysql(void)
return(trx); return(trx);
} }
/************************************************************************
Releases the search latch if trx has reserved it. */
void
trx_search_latch_release_if_reserved(
/*=================================*/
trx_t* trx) /* in: transaction */
{
if (trx->has_search_latch) {
rw_lock_s_unlock(&btr_search_latch);
trx->has_search_latch = FALSE;
}
}
/************************************************************************ /************************************************************************
Frees a transaction object. */ Frees a transaction object. */
...@@ -149,6 +168,7 @@ trx_free( ...@@ -149,6 +168,7 @@ trx_free(
ut_a(trx->update_undo == NULL); ut_a(trx->update_undo == NULL);
ut_a(trx->n_mysql_tables_in_use == 0); ut_a(trx->n_mysql_tables_in_use == 0);
ut_a(trx->mysql_n_tables_locked == 0);
if (trx->undo_no_arr) { if (trx->undo_no_arr) {
trx_undo_arr_free(trx->undo_no_arr); trx_undo_arr_free(trx->undo_no_arr);
...@@ -160,6 +180,8 @@ trx_free( ...@@ -160,6 +180,8 @@ trx_free(
ut_a(trx->wait_lock == NULL); ut_a(trx->wait_lock == NULL);
ut_a(UT_LIST_GET_LEN(trx->wait_thrs) == 0); ut_a(UT_LIST_GET_LEN(trx->wait_thrs) == 0);
ut_a(!trx->has_search_latch);
if (trx->lock_heap) { if (trx->lock_heap) {
mem_heap_free(trx->lock_heap); mem_heap_free(trx->lock_heap);
} }
......
...@@ -144,7 +144,7 @@ test.t1 optimize error The handler for the table doesn't support check/repair ...@@ -144,7 +144,7 @@ test.t1 optimize error The handler for the table doesn't support check/repair
a a
2 2
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
test.t1 check error The handler for the table doesn't support check/repair test.t1 check status OK
a b a b
2 testing 2 testing
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
......
...@@ -1609,7 +1609,7 @@ select t2.fld1,count(*) from t2,t3 where t2.fld1=158402 and t3.name=t2.fld3 grou ...@@ -1609,7 +1609,7 @@ select t2.fld1,count(*) from t2,t3 where t2.fld1=158402 and t3.name=t2.fld3 grou
# #
select sum(Period)/count(*) from t1; select sum(Period)/count(*) from t1;
select companynr,count(price) as "count",sum(price) as "sum" ,sum(price)/count(price)-avg(price) as "diff",(0+count(price))*companynr as func from t3 group by companynr; select companynr,count(price) as "count",sum(price) as "sum" ,abs(sum(price)/count(price)-avg(price)) as "diff",(0+count(price))*companynr as func from t3 group by companynr;
select companynr,sum(price)/count(price) as avg from t3 group by companynr having avg > 70000000 order by avg; select companynr,sum(price)/count(price) as avg from t3 group by companynr having avg > 70000000 order by avg;
# #
......
...@@ -101,7 +101,7 @@ my_bool set_changeable_var(my_string str,CHANGEABLE_VAR *vars) ...@@ -101,7 +101,7 @@ my_bool set_changeable_var(my_string str,CHANGEABLE_VAR *vars)
} }
if (num < (longlong) found->min_value) if (num < (longlong) found->min_value)
num=(longlong) found->min_value; num=(longlong) found->min_value;
else if (num > (longlong) (ulong) found->max_value) else if (num > 0 && (ulonglong) num > (ulonglong) (ulong) found->max_value)
num=(longlong) (ulong) found->max_value; num=(longlong) (ulong) found->max_value;
num=((num- (longlong) found->sub_size) / (ulonglong) found->block_size); num=((num- (longlong) found->sub_size) / (ulonglong) found->block_size);
(*found->varptr)= (long) (num*(ulonglong) found->block_size); (*found->varptr)= (long) (num*(ulonglong) found->block_size);
......
...@@ -266,6 +266,8 @@ int tree_delete(TREE *tree, void *key) ...@@ -266,6 +266,8 @@ int tree_delete(TREE *tree, void *key)
} }
if (remove_colour == BLACK) if (remove_colour == BLACK)
rb_delete_fixup(tree,parent); rb_delete_fixup(tree,parent);
if (tree->free)
(*tree->free)(ELEMENT_KEY(tree,element));
my_free((gptr) element,MYF(0)); my_free((gptr) element,MYF(0));
tree->elements_in_tree--; tree->elements_in_tree--;
return 0; return 0;
......
# This file describes how to run MySQL benchmarks with Postgres # This file describes how to run MySQL benchmarks with PostgreSQL
# #
# WARNING:
#
# Don't run the --fast test on a PostgreSQL 7.1.1 database on
# which you have any critical data; During one of our test runs
# PostgreSQL got a corrupted database and all data was destroyed!
# (When we tried to restart postmaster, It died with a
# 'no such file or directory' error and never recovered from that!
#
# WARNING
# The test was run on a Intel Xeon 2x 550 Mzh machine with 1G memory, # The test was run on a Intel Xeon 2x 550 Mzh machine with 1G memory,
# 9G hard disk. The OS is Suse 6.4, with Linux 2.2.14 compiled with SMP # 9G hard disk. The OS is Suse 6.4, with Linux 2.2.14 compiled with SMP
...@@ -8,49 +17,52 @@ ...@@ -8,49 +17,52 @@
# on the same machine. No other cpu intensive process was used during # on the same machine. No other cpu intensive process was used during
# the benchmark. # the benchmark.
# # First, install postgresql-7.1.1.tar.gz
#
# First, install postgresql-7.0.2.tar.gz
#
# # Adding the following lines to your ~/.bash_profile or
# Start by adding the following lines to your ~/.bash_profile or
# corresponding file. If you are using csh, use ´setenv´. # corresponding file. If you are using csh, use ´setenv´.
# #
export POSTGRES_INCLUDE=/usr/local/pgsql/include export POSTGRES_INCLUDE=/usr/local/pg/include
export POSTGRES_LIB=/usr/local/pgsql/lib export POSTGRES_LIB=/usr/local/pg/lib
PATH=$PATH:/usr/local/pgsql/bin PATH=$PATH:/usr/local/pg/bin
MANPATH=$MANPATH:/usr/local/pgsql/man MANPATH=$MANPATH:/usr/local/pg/man
# #
# Add the following line to /etc/ld.so.conf: # Add the following line to /etc/ld.so.conf:
# #
/usr/local/pgsql/lib /usr/local/pg/lib
and run ldconfig.
# # and run:
# untar the postgres source distribution and cd to src/
# run the following commands: ldconfig
#
./configure # untar the postgres source distribution, cd to postgresql-*
# and run the following commands:
CFLAGS=-O3 ./configure
gmake gmake
gmake install gmake install
mkdir /usr/local/pgsql/data mkdir /usr/local/pg/data
chown postgres /usr/local/pgsql/data chown postgres /usr/local/pg/data
su - postgres su - postgres
/usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data /usr/local/pg/bin/initdb -D /usr/local/pg/data
su postgres -c "/usr/local/pgsql/bin/postmaster -o -F -D /usr/local/pgsql/data" & /usr/local/pg/bin/postmaster -o -F -D /usr/local/pg/data &
su postgres -c "/usr/local/pgsql/bin/createdb test" /usr/local/pg/bin/createdb test
exit
# #
# Second, install packages DBD-Pg-0.95.tar.gz and DBI-1.14.tar.gz, # Second, install packages DBD-Pg-1.00.tar.gz and DBI-1.14.tar.gz,
# available from http://www.perl.com/CPAN/ # available from http://www.perl.com/CPAN/
#
export POSTGRES_LIB=/usr/local/pg/lib/
export POSTGRES_INCLUDE=/usr/local/pg/include/postgresql
perl Makefile.PL
make
make install
# #
# Now we run the test that can be found in the sql-bench directory in the # Now we run the test that can be found in the sql-bench directory in the
...@@ -59,17 +71,20 @@ su postgres -c "/usr/local/pgsql/bin/createdb test" ...@@ -59,17 +71,20 @@ su postgres -c "/usr/local/pgsql/bin/createdb test"
# We did run two tests: # We did run two tests:
# The standard test # The standard test
run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql
# and a test where we do a vacuum() after each update. # and a test where we do a vacuum() after each update.
# (The time for vacuum() is counted in the book-keeping() column) # (The time for vacuum() is counted in the book-keeping() column)
run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast
# If you want to store the results in a output/RUN-xxx file, you should # If you want to store the results in a output/RUN-xxx file, you should
# repeate the benchmark with the extra option --log --use-old-result # repeate the benchmark with the extra option --log --use-old-result
# This will create a the RUN file based of the previous results # This will create a the RUN file based of the previous results
#
run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --log --use-old-result run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --log --use-old-result
run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast --log --use-old-result run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512MG, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast --log --use-old-result
# Between running the different tests we dropped and recreated the PostgreSQL
# database to ensure that PostgreSQL should get a clean start,
# independent of the previous runs.
Testing server 'PostgreSQL version ???' at 2001-06-03 4:40:22
ATIS table test
Creating tables
Time for create_table (28): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Inserting data
Time to insert (9768): 8 wallclock secs ( 2.97 usr 0.28 sys + 0.00 cusr 0.00 csys = 3.25 CPU)
Retrieving data
Time for select_simple_join (500): 3 wallclock secs ( 0.74 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.77 CPU)
Time for select_join (100): 4 wallclock secs ( 0.52 usr 0.13 sys + 0.00 cusr 0.00 csys = 0.65 CPU)
Time for select_key_prefix_join (100): 11 wallclock secs ( 4.30 usr 0.16 sys + 0.00 cusr 0.00 csys = 4.46 CPU)
Time for select_distinct (800): 22 wallclock secs ( 1.95 usr 0.18 sys + 0.00 cusr 0.00 csys = 2.13 CPU)
Time for select_group (2600): 52 wallclock secs ( 1.43 usr 0.19 sys + 0.00 cusr 0.00 csys = 1.62 CPU)
Removing tables
Time to drop_table (28): 1 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Total time: 101 wallclock secs (11.93 usr 0.98 sys + 0.00 cusr 0.00 csys = 12.91 CPU)
Benchmark DBD suite: 2.13
Date of test: 2001-06-03 19:30:53
Running tests on: Linux 2.4.0-64GB-SMP i686
Arguments:
Comments: Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F
Limits from: mysql,pg
Server version: PostgreSQL version 7.1.1
ATIS: Total time: 101 wallclock secs (11.93 usr 0.98 sys + 0.00 cusr 0.00 csys = 12.91 CPU)
alter-table: Total time: 52 wallclock secs ( 0.49 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.63 CPU)
big-tables: Total time: 1324 wallclock secs ( 8.10 usr 0.69 sys + 0.00 cusr 0.00 csys = 8.79 CPU)
connect: Total time: 555 wallclock secs (51.66 usr 14.01 sys + 0.00 cusr 0.00 csys = 65.67 CPU)
create: Total time: 10008 wallclock secs (27.97 usr 5.83 sys + 0.00 cusr 0.00 csys = 33.80 CPU)
insert: Estimated total time: 199360 wallclock secs (879.85 usr 202.59 sys + 0.00 cusr 0.00 csys = 1082.45 CPU)
select: Estimated total time: 13197 wallclock secs (68.30 usr 8.18 sys + 0.00 cusr 0.00 csys = 76.48 CPU)
wisconsin: Total time: 52 wallclock secs (12.40 usr 2.23 sys + 0.00 cusr 0.00 csys = 14.63 CPU)
All 8 test executed successfully
Tests with estimated time have a + at end of line
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 49.00 0.26 0.06 0.32 992
connect 143.00 8.01 1.89 9.90 10000
connect+select_1_row 195.00 10.94 2.31 13.25 10000
connect+select_simple 157.00 10.42 2.41 12.83 10000
count 131.00 0.03 0.00 0.03 100
count_distinct 132.00 0.31 0.06 0.37 1000
count_distinct_2 213.00 0.37 0.03 0.40 1000
count_distinct_big 266.00 7.91 0.25 8.16 120
count_distinct_group 384.00 1.07 0.08 1.15 1000
count_distinct_group_on_key 488.00 0.41 0.03 0.44 1000
count_distinct_group_on_key_parts 383.00 1.10 0.07 1.17 1000
count_distinct_key_prefix 179.00 0.28 0.07 0.35 1000
count_group_on_key_parts 331.00 1.13 0.06 1.19 1000
count_on_key 1850.00 15.78 1.99 17.77 50100 +
create+drop 3280.00 10.74 1.89 12.63 10000
create_MANY_tables 160.00 3.67 1.35 5.02 5000
create_index 1.00 0.00 0.00 0.00 8
create_key+drop 5781.00 10.70 1.53 12.23 10000
create_table 1.00 0.01 0.00 0.01 31
delete_all 2478.00 0.01 0.00 0.01 12
delete_all_many_keys 94.00 0.05 0.00 0.05 1
delete_big 0.00 0.01 0.00 0.01 1
delete_big_many_keys 93.00 0.05 0.00 0.05 128
delete_key 85.00 3.19 0.48 3.67 10000
drop_index 0.00 0.01 0.00 0.01 8
drop_table 1.00 0.01 0.00 0.01 28
drop_table_when_MANY_tables 772.00 1.29 0.28 1.57 5000
insert 353.00 104.09 24.32 128.41 350768
insert_duplicates 120.00 30.53 10.61 41.14 100000
insert_key 804.00 47.08 47.06 94.14 100000
insert_many_fields 528.00 1.12 0.21 1.33 2000
insert_select_1_key 86.00 0.00 0.00 0.00 1
insert_select_2_keys 196.00 0.00 0.00 0.00 1
min_max 60.00 0.02 0.00 0.02 60
min_max_on_key 10543.00 25.38 4.37 29.75 85000 ++
order_by_big 148.00 21.11 0.72 21.83 10
order_by_big_key 145.00 24.01 1.27 25.28 10
order_by_big_key2 132.00 21.28 0.64 21.92 10
order_by_big_key_desc 145.00 23.93 1.27 25.20 10
order_by_big_key_diff 138.00 21.30 0.56 21.86 10
order_by_big_key_prefix 133.00 21.16 0.80 21.96 10
order_by_key2_diff 7.00 1.94 0.03 1.97 500
order_by_key_prefix 4.00 1.04 0.08 1.12 500
order_by_range 4.00 1.13 0.06 1.19 500
outer_join 2539.00 0.00 0.01 0.01 10
outer_join_found 2515.00 0.00 0.00 0.00 10
outer_join_not_found 124666.00 0.00 0.00 0.00 500 +
outer_join_on_key 2307.00 0.00 0.00 0.00 10
select_1_row 6.00 2.25 0.46 2.71 10000
select_2_rows 7.00 2.77 0.38 3.15 10000
select_big 93.00 33.23 9.79 43.02 10080
select_column+column 8.00 2.78 0.41 3.19 10000
select_diff_key 0.00 0.21 0.02 0.23 500
select_distinct 22.00 1.95 0.18 2.13 800
select_group 326.00 1.47 0.20 1.67 2711
select_group_when_MANY_tables 15.00 1.57 0.78 2.35 5000
select_join 4.00 0.52 0.13 0.65 100
select_key 243.00 68.03 8.10 76.13 200000
select_key2 208.00 66.48 8.68 75.16 200000
select_key2_return_key 200.00 66.41 7.77 74.18 200000
select_key2_return_prim 204.00 64.75 7.90 72.65 200000
select_key_prefix 208.00 66.62 8.81 75.43 200000
select_key_prefix_join 11.00 4.30 0.16 4.46 100
select_key_return_key 239.00 66.86 8.37 75.23 200000
select_many_fields 795.00 6.97 0.48 7.45 2000
select_query_cache 2549.00 3.25 0.52 3.77 10000
select_query_cache2 2547.00 3.04 0.53 3.57 10000
select_range 465.00 10.41 0.63 11.04 410
select_range_key2 20341.00 4.22 0.52 4.74 25010 ++
select_range_prefix 20344.00 6.32 1.04 7.36 25010 ++
select_simple 5.00 2.73 0.30 3.03 10000
select_simple_join 3.00 0.74 0.03 0.77 500
update_big 6046.00 0.01 0.00 0.01 10
update_of_key 136.00 16.21 11.85 28.06 50000
update_of_key_big 320.00 0.16 0.09 0.25 501
update_of_primary_key_many_keys 5365.00 0.16 0.03 0.19 256
update_with_key 518.00 89.50 33.03 122.53 300000
update_with_key_prefix 186.00 30.32 15.83 46.15 100000
wisc_benchmark 16.00 3.30 0.65 3.95 114
TOTALS 224650.00 1060.42 234.52 1294.94 2551551 ++++++++
Testing server 'PostgreSQL version ???' at 2001-06-03 4:42:04
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 1 wallclock secs ( 0.21 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.29 CPU)
Time for alter_table_add (992): 49 wallclock secs ( 0.26 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.32 CPU)
Time for create_index (8): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop_index (8): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Total time: 52 wallclock secs ( 0.49 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.63 CPU)
Testing server 'PostgreSQL version ???' at 2001-06-03 4:42:56
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
Time to select_many_fields(1000): 338 wallclock secs ( 3.28 usr 0.22 sys + 0.00 cusr 0.00 csys = 3.50 CPU)
Testing select all_fields from table with 1 record
Time to select_many_fields(1000): 457 wallclock secs ( 3.69 usr 0.26 sys + 0.00 cusr 0.00 csys = 3.95 CPU)
Testing insert VALUES()
Time to insert_many_fields(1000): 229 wallclock secs ( 0.40 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.47 CPU)
Testing insert (all_fields) VALUES()
Time to insert_many_fields(1000): 299 wallclock secs ( 0.72 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.86 CPU)
Total time: 1324 wallclock secs ( 8.10 usr 0.69 sys + 0.00 cusr 0.00 csys = 8.79 CPU)
Testing server 'PostgreSQL version ???' at 2001-06-03 5:05:01
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 143 wallclock secs ( 8.01 usr 1.89 sys + 0.00 cusr 0.00 csys = 9.90 CPU)
Test connect/simple select/disconnect
Time for connect+select_simple (10000): 157 wallclock secs (10.42 usr 2.41 sys + 0.00 cusr 0.00 csys = 12.83 CPU)
Test simple select
Time for select_simple (10000): 5 wallclock secs ( 2.73 usr 0.30 sys + 0.00 cusr 0.00 csys = 3.03 CPU)
Testing connect/select 1 row from table/disconnect
Time to connect+select_1_row (10000): 195 wallclock secs (10.94 usr 2.31 sys + 0.00 cusr 0.00 csys = 13.25 CPU)
Testing select 1 row from table
Time to select_1_row (10000): 6 wallclock secs ( 2.25 usr 0.46 sys + 0.00 cusr 0.00 csys = 2.71 CPU)
Testing select 2 rows from table
Time to select_2_rows (10000): 7 wallclock secs ( 2.77 usr 0.38 sys + 0.00 cusr 0.00 csys = 3.15 CPU)
Test select with aritmetic (+)
Time for select_column+column (10000): 8 wallclock secs ( 2.78 usr 0.41 sys + 0.00 cusr 0.00 csys = 3.19 CPU)
Testing retrieval of big records (65000 bytes)
Time to select_big (10000): 34 wallclock secs (11.75 usr 5.84 sys + 0.00 cusr 0.00 csys = 17.59 CPU)
Total time: 555 wallclock secs (51.66 usr 14.01 sys + 0.00 cusr 0.00 csys = 65.67 CPU)
Testing server 'PostgreSQL version ???' at 2001-06-03 5:14:17
Testing the speed of creating and droping tables
Testing with 5000 tables and 10000 loop count
Testing create of tables
Time for create_MANY_tables (5000): 160 wallclock secs ( 3.67 usr 1.35 sys + 0.00 cusr 0.00 csys = 5.02 CPU)
Accessing tables
Time to select_group_when_MANY_tables (5000): 15 wallclock secs ( 1.57 usr 0.78 sys + 0.00 cusr 0.00 csys = 2.35 CPU)
Testing drop
Time for drop_table_when_MANY_tables (5000): 772 wallclock secs ( 1.29 usr 0.28 sys + 0.00 cusr 0.00 csys = 1.57 CPU)
Testing create+drop
Time for create+drop (10000): 3280 wallclock secs (10.74 usr 1.89 sys + 0.00 cusr 0.00 csys = 12.63 CPU)
Time for create_key+drop (10000): 5781 wallclock secs (10.70 usr 1.53 sys + 0.00 cusr 0.00 csys = 12.23 CPU)
Total time: 10008 wallclock secs (27.97 usr 5.83 sys + 0.00 cusr 0.00 csys = 33.80 CPU)
Testing server 'PostgreSQL version ???' at 2001-06-03 8:01:05
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 302 wallclock secs (89.07 usr 22.07 sys + 0.00 cusr 0.00 csys = 111.14 CPU)
Testing insert of duplicates
Time for insert_duplicates (100000): 120 wallclock secs (30.53 usr 10.61 sys + 0.00 cusr 0.00 csys = 41.14 CPU)
Retrieving data from the table
Time for select_big (10:3000000): 58 wallclock secs (21.31 usr 3.95 sys + 0.00 cusr 0.00 csys = 25.26 CPU)
Time for order_by_big_key (10:3000000): 145 wallclock secs (24.01 usr 1.27 sys + 0.00 cusr 0.00 csys = 25.28 CPU)
Time for order_by_big_key_desc (10:3000000): 145 wallclock secs (23.93 usr 1.27 sys + 0.00 cusr 0.00 csys = 25.20 CPU)
Time for order_by_big_key_prefix (10:3000000): 133 wallclock secs (21.16 usr 0.80 sys + 0.00 cusr 0.00 csys = 21.96 CPU)
Time for order_by_big_key2 (10:3000000): 132 wallclock secs (21.28 usr 0.64 sys + 0.00 cusr 0.00 csys = 21.92 CPU)
Time for order_by_big_key_diff (10:3000000): 138 wallclock secs (21.30 usr 0.56 sys + 0.00 cusr 0.00 csys = 21.86 CPU)
Time for order_by_big (10:3000000): 148 wallclock secs (21.11 usr 0.72 sys + 0.00 cusr 0.00 csys = 21.83 CPU)
Time for order_by_range (500:125750): 4 wallclock secs ( 1.13 usr 0.06 sys + 0.00 cusr 0.00 csys = 1.19 CPU)
Time for order_by_key_prefix (500:125750): 4 wallclock secs ( 1.04 usr 0.08 sys + 0.00 cusr 0.00 csys = 1.12 CPU)
Time for order_by_key2_diff (500:250500): 7 wallclock secs ( 1.94 usr 0.03 sys + 0.00 cusr 0.00 csys = 1.97 CPU)
Time for select_diff_key (500:1000): 0 wallclock secs ( 0.21 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.23 CPU)
Note: Query took longer then time-limit: 600
Estimating end time based on:
210 queries in 210 loops of 5010 loops took 616 seconds
Estimated time for select_range_prefix (5010:1764): 14696 wallclock secs ( 2.62 usr 0.48 sys + 0.00 cusr 0.00 csys = 3.10 CPU)
Note: Query took longer then time-limit: 600
Estimating end time based on:
210 queries in 210 loops of 5010 loops took 615 seconds
Estimated time for select_range_key2 (5010:1764): 14672 wallclock secs ( 1.67 usr 0.24 sys + 0.00 cusr 0.00 csys = 1.91 CPU)
Time for select_key_prefix (200000): 208 wallclock secs (66.62 usr 8.81 sys + 0.00 cusr 0.00 csys = 75.43 CPU)
Time for select_key (200000): 243 wallclock secs (68.03 usr 8.10 sys + 0.00 cusr 0.00 csys = 76.13 CPU)
Time for select_key_return_key (200000): 239 wallclock secs (66.86 usr 8.37 sys + 0.00 cusr 0.00 csys = 75.23 CPU)
Time for select_key2 (200000): 208 wallclock secs (66.48 usr 8.68 sys + 0.00 cusr 0.00 csys = 75.16 CPU)
Time for select_key2_return_key (200000): 200 wallclock secs (66.41 usr 7.77 sys + 0.00 cusr 0.00 csys = 74.18 CPU)
Time for select_key2_return_prim (200000): 204 wallclock secs (64.75 usr 7.90 sys + 0.00 cusr 0.00 csys = 72.65 CPU)
Test of compares with simple ranges
Note: Query took longer then time-limit: 600
Estimating end time based on:
2160 queries in 54 loops of 500 loops took 610 seconds
Estimated time for select_range_prefix (20000:4698): 5648 wallclock secs ( 3.70 usr 0.56 sys + 0.00 cusr 0.00 csys = 4.26 CPU)
Note: Query took longer then time-limit: 600
Estimating end time based on:
2120 queries in 53 loops of 500 loops took 601 seconds
Estimated time for select_range_key2 (20000:4611): 5669 wallclock secs ( 2.55 usr 0.28 sys + 0.00 cusr 0.00 csys = 2.83 CPU)
Time for select_group (111): 274 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Note: Query took longer then time-limit: 600
Estimating end time based on:
1320 queries in 220 loops of 2500 loops took 601 seconds
Estimated time for min_max_on_key (15000): 6829 wallclock secs ( 5.23 usr 0.91 sys + 0.00 cusr 0.00 csys = 6.14 CPU)
Time for min_max (60): 60 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.02 CPU)
Time for count_on_key (100): 116 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.02 CPU)
Time for count (100): 131 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Time for count_distinct_big (20): 201 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Testing update of keys with functions
Time for update_of_key (50000): 136 wallclock secs (16.21 usr 11.85 sys + 0.00 cusr 0.00 csys = 28.06 CPU)
Time for update_of_key_big (501): 320 wallclock secs ( 0.16 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.25 CPU)
Testing update with key
Time for update_with_key (300000): 518 wallclock secs (89.50 usr 33.03 sys + 0.00 cusr 0.00 csys = 122.53 CPU)
Time for update_with_key_prefix (100000): 186 wallclock secs (30.32 usr 15.83 sys + 0.00 cusr 0.00 csys = 46.15 CPU)
Testing update of all rows
Time for update_big (10): 6046 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Testing left outer join
Time for outer_join_on_key (10:10): 2307 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join (10:10): 2539 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Time for outer_join_found (10:10): 2515 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Note: Query took longer then time-limit: 600
Estimating end time based on:
3 queries in 3 loops of 500 loops took 748 seconds
Estimated time for outer_join_not_found (500:500): 124666 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing INSERT INTO ... SELECT
Time for insert_select_1_key (1): 86 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for insert_select_2_keys (1): 196 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop table(2): 22 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing delete
Time for delete_key (10000): 85 wallclock secs ( 3.19 usr 0.48 sys + 0.00 cusr 0.00 csys = 3.67 CPU)
Time for delete_all (12): 2478 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 804 wallclock secs (47.08 usr 47.06 sys + 0.00 cusr 0.00 csys = 94.14 CPU)
Testing update of keys
Time for update_of_primary_key_many_keys (256): 5365 wallclock secs ( 0.16 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.19 CPU)
Deleting rows from the table
Time for delete_big_many_keys (128): 93 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Deleting everything from table
Time for delete_all_many_keys (1): 94 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Estimated total time: 199360 wallclock secs (879.85 usr 202.59 sys + 0.00 cusr 0.00 csys = 1082.45 CPU)
Testing server 'PostgreSQL version ???' at 2001-06-03 16:37:16
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 10 wallclock secs ( 2.96 usr 0.39 sys + 0.00 cusr 0.00 csys = 3.35 CPU)
Test if the database has a query cache
Time for select_query_cache (10000): 2549 wallclock secs ( 3.25 usr 0.52 sys + 0.00 cusr 0.00 csys = 3.77 CPU)
Time for select_query_cache2 (10000): 2547 wallclock secs ( 3.04 usr 0.53 sys + 0.00 cusr 0.00 csys = 3.57 CPU)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 0.17 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.17 CPU)
Time for select_range (410:1057904): 465 wallclock secs (10.41 usr 0.63 sys + 0.00 cusr 0.00 csys = 11.04 CPU)
Note: Query took longer then time-limit: 600
Estimating end time based on:
11326 queries in 1618 loops of 10000 loops took 601 seconds
Estimated time for min_max_on_key (70000): 3714 wallclock secs (20.15 usr 3.46 sys + 0.00 cusr 0.00 csys = 23.61 CPU)
Note: Query took longer then time-limit: 600
Estimating end time based on:
17320 queries in 3464 loops of 10000 loops took 601 seconds
Estimated time for count_on_key (50000): 1734 wallclock secs (15.76 usr 1.99 sys + 0.00 cusr 0.00 csys = 17.75 CPU)
Time for count_group_on_key_parts (1000:100000): 331 wallclock secs ( 1.13 usr 0.06 sys + 0.00 cusr 0.00 csys = 1.19 CPU)
Testing count(distinct) on the table
Time for count_distinct_key_prefix (1000:1000): 179 wallclock secs ( 0.28 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.35 CPU)
Time for count_distinct (1000:1000): 132 wallclock secs ( 0.31 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.37 CPU)
Time for count_distinct_2 (1000:1000): 213 wallclock secs ( 0.37 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.40 CPU)
Time for count_distinct_group_on_key (1000:6000): 488 wallclock secs ( 0.41 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.44 CPU)
Time for count_distinct_group_on_key_parts (1000:100000): 383 wallclock secs ( 1.10 usr 0.07 sys + 0.00 cusr 0.00 csys = 1.17 CPU)
Time for count_distinct_group (1000:100000): 384 wallclock secs ( 1.07 usr 0.08 sys + 0.00 cusr 0.00 csys = 1.15 CPU)
Time for count_distinct_big (100:1000000): 65 wallclock secs ( 7.88 usr 0.25 sys + 0.00 cusr 0.00 csys = 8.13 CPU)
Estimated total time: 13197 wallclock secs (68.30 usr 8.18 sys + 0.00 cusr 0.00 csys = 76.48 CPU)
Testing server 'PostgreSQL version ???' at 2001-06-03 19:06:27
Wisconsin benchmark test
Time for create_table (3): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (31000): 33 wallclock secs ( 9.09 usr 1.58 sys + 0.00 cusr 0.00 csys = 10.67 CPU)
Time to delete_big (1): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Running actual benchmark
Time for wisc_benchmark (114): 16 wallclock secs ( 3.30 usr 0.65 sys + 0.00 cusr 0.00 csys = 3.95 CPU)
Total time: 52 wallclock secs (12.40 usr 2.23 sys + 0.00 cusr 0.00 csys = 14.63 CPU)
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
# $server Object for current server # $server Object for current server
# $limits Hash reference to limits for benchmark # $limits Hash reference to limits for benchmark
$benchmark_version="2.12"; $benchmark_version="2.13";
use Getopt::Long; use Getopt::Long;
require "$pwd/server-cfg" || die "Can't read Configuration file: $!\n"; require "$pwd/server-cfg" || die "Can't read Configuration file: $!\n";
......
#This file is automaticly generated by crash-me 1.54 #This file is automaticly generated by crash-me 1.56
NEG=yes # update of column= -column NEG=yes # update of column= -column
Need_cast_for_null=no # Need to cast NULL for arithmetic Need_cast_for_null=no # Need to cast NULL for arithmetic
alter_add_col=yes # Alter table add column alter_add_col=yes # Alter table add column
alter_add_constraint=no # Alter table add constraint alter_add_constraint=yes # Alter table add constraint
alter_add_foreign_key=yes # Alter table add foreign key alter_add_foreign_key=no # Alter table add foreign key
alter_add_multi_col=no # Alter table add many columns alter_add_multi_col=no # Alter table add many columns
alter_add_primary_key=no # Alter table add primary key alter_add_primary_key=no # Alter table add primary key
alter_add_unique=no # Alter table add unique alter_add_unique=no # Alter table add unique
...@@ -29,21 +29,22 @@ columns_in_order_by=+64 # number of columns in order by ...@@ -29,21 +29,22 @@ columns_in_order_by=+64 # number of columns in order by
comment_#=no # # as comment comment_#=no # # as comment
comment_--=yes # -- as comment (ANSI) comment_--=yes # -- as comment (ANSI)
comment_/**/=yes # /* */ as comment comment_/**/=yes # /* */ as comment
comment_//=no # // as comment (ANSI) comment_//=no # // as comment
compute=no # Compute compute=no # Compute
connections=32 # Simultaneous connections (installation default) connections=32 # Simultaneous connections (installation default)
constraint_check=yes # Column constraints constraint_check=yes # Column constraints
constraint_check_table=yes # Table constraints constraint_check_table=yes # Table constraints
constraint_null=yes # NULL constraint (SyBase style) constraint_null=yes # NULL constraint (SyBase style)
crash_me_safe=yes # crash me safe crash_me_safe=yes # crash me safe
crash_me_version=1.54 # crash me version crash_me_version=1.56 # crash me version
create_default=yes # default value for column create_default=yes # default value for column
create_default_func=no # default value function for column create_default_func=yes # default value function for column
create_if_not_exists=no # create table if not exists create_if_not_exists=no # create table if not exists
create_index=yes # create index create_index=yes # create index
create_schema=no # Create SCHEMA create_schema=no # Create SCHEMA
create_table_select=with AS # create table from select create_table_select=with AS # create table from select
cross_join=yes # cross join (same as from a,b) cross_join=yes # cross join (same as from a,b)
date_as_string=yes # String functions on date columns
date_infinity=no # Supports 'infinity dates date_infinity=no # Supports 'infinity dates
date_last=yes # Supports 9999-12-31 dates date_last=yes # Supports 9999-12-31 dates
date_one=yes # Supports 0001-01-01 dates date_one=yes # Supports 0001-01-01 dates
...@@ -58,16 +59,16 @@ drop_requires_cascade=no # drop table require cascade/restrict ...@@ -58,16 +59,16 @@ drop_requires_cascade=no # drop table require cascade/restrict
drop_restrict=no # drop table with cascade/restrict drop_restrict=no # drop table with cascade/restrict
end_colon=yes # allows end ';' end_colon=yes # allows end ';'
except=yes # except except=yes # except
except_all=no # except all except_all=yes # except all
except_all_incompat=no # except all (incompatible lists) except_all_incompat=no # except all (incompatible lists)
except_incompat=no # except (incompatible lists) except_incompat=no # except (incompatible lists)
float_int_expr=yes # mixing of integer and float in expression float_int_expr=yes # mixing of integer and float in expression
foreign_key=yes # foreign keys foreign_key=yes # foreign keys
foreign_key_syntax=yes # foreign key syntax foreign_key_syntax=yes # foreign key syntax
full_outer_join=no # full outer join full_outer_join=yes # full outer join
func_extra_!=no # Function NOT as '!' in SELECT func_extra_!=no # Function NOT as '!' in SELECT
func_extra_%=yes # Function MOD as % func_extra_%=yes # Function MOD as %
func_extra_&=no # Function & (bitwise and) func_extra_&=yes # Function & (bitwise and)
func_extra_&&=no # Function AND as '&&' func_extra_&&=no # Function AND as '&&'
func_extra_<>=yes # Function <> in SELECT func_extra_<>=yes # Function <> in SELECT
func_extra_==yes # Function = func_extra_==yes # Function =
...@@ -79,12 +80,12 @@ func_extra_atn2=no # Function ATN2 ...@@ -79,12 +80,12 @@ func_extra_atn2=no # Function ATN2
func_extra_auto_num2string=no # Function automatic num->string convert func_extra_auto_num2string=no # Function automatic num->string convert
func_extra_auto_string2num=yes # Function automatic string->num convert func_extra_auto_string2num=yes # Function automatic string->num convert
func_extra_between=yes # Function BETWEEN in SELECT func_extra_between=yes # Function BETWEEN in SELECT
func_extra_binary_shifts=no # Function << and >> (bitwise shifts) func_extra_binary_shifts=yes # Function << and >> (bitwise shifts)
func_extra_bit_count=no # Function BIT_COUNT func_extra_bit_count=no # Function BIT_COUNT
func_extra_ceil=yes # Function CEIL func_extra_ceil=yes # Function CEIL
func_extra_charindex=no # Function CHARINDEX func_extra_charindex=no # Function CHARINDEX
func_extra_chr=no # Function CHR func_extra_chr=yes # Function CHR
func_extra_concat_as_+=no # Function concatenation with + func_extra_concat_as_+=error # Function concatenation with +
func_extra_concat_list=no # Function CONCAT(list) func_extra_concat_list=no # Function CONCAT(list)
func_extra_convert=no # Function CONVERT func_extra_convert=no # Function CONVERT
func_extra_cosh=no # Function COSH func_extra_cosh=no # Function COSH
...@@ -103,7 +104,7 @@ func_extra_getdate=no # Function GETDATE ...@@ -103,7 +104,7 @@ func_extra_getdate=no # Function GETDATE
func_extra_greatest=no # Function GREATEST func_extra_greatest=no # Function GREATEST
func_extra_if=no # Function IF func_extra_if=no # Function IF
func_extra_in_num=yes # Function IN on numbers in SELECT func_extra_in_num=yes # Function IN on numbers in SELECT
func_extra_in_str=no # Function IN on strings in SELECT func_extra_in_str=yes # Function IN on strings in SELECT
func_extra_initcap=yes # Function INITCAP func_extra_initcap=yes # Function INITCAP
func_extra_instr=no # Function LOCATE as INSTR func_extra_instr=no # Function LOCATE as INSTR
func_extra_instr_oracle=no # Function INSTR (Oracle syntax) func_extra_instr_oracle=no # Function INSTR (Oracle syntax)
...@@ -114,7 +115,7 @@ func_extra_last_insert_id=no # Function LAST_INSERT_ID ...@@ -114,7 +115,7 @@ func_extra_last_insert_id=no # Function LAST_INSERT_ID
func_extra_least=no # Function LEAST func_extra_least=no # Function LEAST
func_extra_lengthb=no # Function LENGTHB func_extra_lengthb=no # Function LENGTHB
func_extra_like=yes # Function LIKE in SELECT func_extra_like=yes # Function LIKE in SELECT
func_extra_like_escape=no # Function LIKE ESCAPE in SELECT func_extra_like_escape=yes # Function LIKE ESCAPE in SELECT
func_extra_ln=no # Function LN func_extra_ln=no # Function LN
func_extra_log(m_n)=yes # Function LOG(m,n) func_extra_log(m_n)=yes # Function LOG(m,n)
func_extra_logn=no # Function LOGN func_extra_logn=no # Function LOGN
...@@ -160,7 +161,7 @@ func_extra_unix_timestamp=no # Function UNIX_TIMESTAMP ...@@ -160,7 +161,7 @@ func_extra_unix_timestamp=no # Function UNIX_TIMESTAMP
func_extra_userenv=no # Function USERENV func_extra_userenv=no # Function USERENV
func_extra_version=yes # Function VERSION func_extra_version=yes # Function VERSION
func_extra_weekday=no # Function WEEKDAY func_extra_weekday=no # Function WEEKDAY
func_extra_|=no # Function | (bitwise or) func_extra_|=yes # Function | (bitwise or)
func_extra_||=no # Function OR as '||' func_extra_||=no # Function OR as '||'
func_extra_~*=yes # Function ~* (case insensitive compare) func_extra_~*=yes # Function ~* (case insensitive compare)
func_odbc_abs=yes # Function ABS func_odbc_abs=yes # Function ABS
...@@ -192,7 +193,7 @@ func_odbc_ifnull=no # Function IFNULL ...@@ -192,7 +193,7 @@ func_odbc_ifnull=no # Function IFNULL
func_odbc_insert=no # Function INSERT func_odbc_insert=no # Function INSERT
func_odbc_lcase=no # Function LCASE func_odbc_lcase=no # Function LCASE
func_odbc_left=no # Function LEFT func_odbc_left=no # Function LEFT
func_odbc_length=no # Function REAL LENGTH func_odbc_length=yes # Function REAL LENGTH
func_odbc_length_without_space=no # Function ODBC LENGTH func_odbc_length_without_space=no # Function ODBC LENGTH
func_odbc_locate_2=no # Function LOCATE(2 arg) func_odbc_locate_2=no # Function LOCATE(2 arg)
func_odbc_locate_3=no # Function LOCATE(3 arg) func_odbc_locate_3=no # Function LOCATE(3 arg)
...@@ -220,7 +221,7 @@ func_odbc_sin=yes # Function SIN ...@@ -220,7 +221,7 @@ func_odbc_sin=yes # Function SIN
func_odbc_soundex=no # Function SOUNDEX func_odbc_soundex=no # Function SOUNDEX
func_odbc_space=no # Function SPACE func_odbc_space=no # Function SPACE
func_odbc_sqrt=no # Function SQRT func_odbc_sqrt=no # Function SQRT
func_odbc_substring=no # Function ODBC SUBSTRING func_odbc_substring=yes # Function ODBC SUBSTRING
func_odbc_tan=yes # Function TAN func_odbc_tan=yes # Function TAN
func_odbc_timestampadd=no # Function TIMESTAMPADD func_odbc_timestampadd=no # Function TIMESTAMPADD
func_odbc_timestampdiff=no # Function TIMESTAMPDIFF func_odbc_timestampdiff=no # Function TIMESTAMPDIFF
...@@ -246,8 +247,8 @@ func_sql_localtime=no # Function LOCALTIME ...@@ -246,8 +247,8 @@ func_sql_localtime=no # Function LOCALTIME
func_sql_localtimestamp=no # Function LOCALTIMESTAMP func_sql_localtimestamp=no # Function LOCALTIMESTAMP
func_sql_lower=yes # Function LOWER func_sql_lower=yes # Function LOWER
func_sql_nullif_num=yes # Function NULLIF with numbers func_sql_nullif_num=yes # Function NULLIF with numbers
func_sql_nullif_string=no # Function NULLIF with strings func_sql_nullif_string=yes # Function NULLIF with strings
func_sql_octet_length=no # Function OCTET_LENGTH func_sql_octet_length=yes # Function OCTET_LENGTH
func_sql_position=yes # Function POSITION func_sql_position=yes # Function POSITION
func_sql_searched_case=yes # Function searched CASE func_sql_searched_case=yes # Function searched CASE
func_sql_session_user=yes # Function SESSION_USER func_sql_session_user=yes # Function SESSION_USER
...@@ -264,7 +265,7 @@ func_where_eq_some=yes # Function = SOME ...@@ -264,7 +265,7 @@ func_where_eq_some=yes # Function = SOME
func_where_exists=yes # Function EXISTS func_where_exists=yes # Function EXISTS
func_where_in_num=yes # Function IN on numbers func_where_in_num=yes # Function IN on numbers
func_where_like=yes # Function LIKE func_where_like=yes # Function LIKE
func_where_like_escape=no # Function LIKE ESCAPE func_where_like_escape=yes # Function LIKE ESCAPE
func_where_match=no # Function MATCH func_where_match=no # Function MATCH
func_where_match_unique=no # Function MATCH UNIQUE func_where_match_unique=no # Function MATCH UNIQUE
func_where_matches=no # Function MATCHES func_where_matches=no # Function MATCHES
...@@ -283,8 +284,8 @@ group_func_extra_bit_and=no # Group function BIT_AND ...@@ -283,8 +284,8 @@ group_func_extra_bit_and=no # Group function BIT_AND
group_func_extra_bit_or=no # Group function BIT_OR group_func_extra_bit_or=no # Group function BIT_OR
group_func_extra_count_distinct_list=no # Group function COUNT(DISTINCT expr,expr,...) group_func_extra_count_distinct_list=no # Group function COUNT(DISTINCT expr,expr,...)
group_func_extra_std=no # Group function STD group_func_extra_std=no # Group function STD
group_func_extra_stddev=no # Group function STDDEV group_func_extra_stddev=yes # Group function STDDEV
group_func_extra_variance=no # Group function VARIANCE group_func_extra_variance=yes # Group function VARIANCE
group_func_sql_any=no # Group function ANY group_func_sql_any=no # Group function ANY
group_func_sql_avg=yes # Group function AVG group_func_sql_avg=yes # Group function AVG
group_func_sql_count_*=yes # Group function COUNT (*) group_func_sql_count_*=yes # Group function COUNT (*)
...@@ -315,37 +316,37 @@ insert_multi_value=no # INSERT with Value lists ...@@ -315,37 +316,37 @@ insert_multi_value=no # INSERT with Value lists
insert_select=yes # insert INTO ... SELECT ... insert_select=yes # insert INTO ... SELECT ...
insert_with_set=no # INSERT with set syntax insert_with_set=no # INSERT with set syntax
intersect=yes # intersect intersect=yes # intersect
intersect_all=no # intersect all intersect_all=yes # intersect all
intersect_all_incompat=no # intersect all (incompatible lists) intersect_all_incompat=no # intersect all (incompatible lists)
intersect_incompat=no # intersect (incompatible lists) intersect_incompat=no # intersect (incompatible lists)
join_tables=+64 # tables in join join_tables=+64 # tables in join
left_outer_join=no # left outer join left_outer_join=yes # left outer join
left_outer_join_using=no # left outer join using left_outer_join_using=yes # left outer join using
like_with_column=yes # column LIKE column like_with_column=yes # column LIKE column
like_with_number=yes # LIKE on numbers like_with_number=yes # LIKE on numbers
lock_tables=yes # lock table lock_tables=yes # lock table
logical_value=1 # Value of logical operation (1=1) logical_value=1 # Value of logical operation (1=1)
max_big_expressions=10 # big expressions max_big_expressions=10 # big expressions
max_char_size=8104 # max char() size max_char_size=+8000000 # max char() size
max_column_name=+512 # column name length max_column_name=+512 # column name length
max_columns=1600 # Columns in table max_columns=1600 # Columns in table
max_conditions=19994 # OR and AND in WHERE max_conditions=19994 # OR and AND in WHERE
max_expressions=9999 # simple expressions max_expressions=9999 # simple expressions
max_index=+64 # max index max_index=+64 # max index
max_index_length=2704 # index length max_index_length=+8192 # index length
max_index_name=+512 # index name length max_index_name=+512 # index name length
max_index_part_length=2704 # max index part length max_index_part_length=235328 # max index part length
max_index_parts=16 # index parts max_index_parts=16 # index parts
max_index_varchar_part_length=2704 # index varchar part length max_index_varchar_part_length=235328 # index varchar part length
max_row_length=7949 # max table row length (without blobs) max_row_length=64519 # max table row length (without blobs)
max_row_length_with_null=7949 # table row length with nulls (without blobs) max_row_length_with_null=64519 # table row length with nulls (without blobs)
max_select_alias_name=+512 # select alias name length max_select_alias_name=+512 # select alias name length
max_stack_expression=+2000 # stacked expressions max_stack_expression=+2000 # stacked expressions
max_table_alias_name=+512 # table alias name length max_table_alias_name=+512 # table alias name length
max_table_name=+512 # table name length max_table_name=+512 # table name length
max_text_size=8104 # max text or blob size max_text_size=+8000000 # max text or blob size
max_unique_index=+64 # unique indexes max_unique_index=+64 # unique indexes
max_varchar_size=8104 # max varchar() size max_varchar_size=+8000000 # max varchar() size
minus=no # minus minus=no # minus
minus_incompat=no # minus (incompatible lists) minus_incompat=no # minus (incompatible lists)
minus_neg=no # Calculate 1--1 minus_neg=no # Calculate 1--1
...@@ -356,7 +357,7 @@ multi_table_delete=no # DELETE FROM table1,table2... ...@@ -356,7 +357,7 @@ multi_table_delete=no # DELETE FROM table1,table2...
multi_table_update=no # Update with many tables multi_table_update=no # Update with many tables
natural_join=yes # natural join natural_join=yes # natural join
natural_join_incompat=yes # natural join (incompatible lists) natural_join_incompat=yes # natural join (incompatible lists)
natural_left_outer_join=no # natural left outer join natural_left_outer_join=yes # natural left outer join
no_primary_key=yes # Tables without primary key no_primary_key=yes # Tables without primary key
null_concat_expr=yes # Is 'a' || NULL = NULL null_concat_expr=yes # Is 'a' || NULL = NULL
null_in_index=yes # null in index null_in_index=yes # null in index
...@@ -364,7 +365,7 @@ null_in_unique=yes # null in unique index ...@@ -364,7 +365,7 @@ null_in_unique=yes # null in unique index
null_num_expr=yes # Is 1+NULL = NULL null_num_expr=yes # Is 1+NULL = NULL
nulls_in_unique=yes # null combination in unique index nulls_in_unique=yes # null combination in unique index
odbc_left_outer_join=no # left outer join odbc style odbc_left_outer_join=no # left outer join odbc style
operating_system=Linux 2.2.14-5.0 i686 # crash-me tested on operating_system=Linux 2.4.0-64GB-SMP i686 # crash-me tested on
order_by=yes # Order by order_by=yes # Order by
order_by_alias=yes # Order by alias order_by_alias=yes # Order by alias
order_by_function=yes # Order by function order_by_function=yes # Order by function
...@@ -386,7 +387,7 @@ remember_end_space=no # Remembers end space in char() ...@@ -386,7 +387,7 @@ remember_end_space=no # Remembers end space in char()
remember_end_space_varchar=yes # Remembers end space in varchar() remember_end_space_varchar=yes # Remembers end space in varchar()
rename_table=no # rename table rename_table=no # rename table
repeat_string_size=+8000000 # return string size from function repeat_string_size=+8000000 # return string size from function
right_outer_join=no # right outer join right_outer_join=yes # right outer join
rowid=oid # Type for row id rowid=oid # Type for row id
select_constants=yes # Select constants select_constants=yes # Select constants
select_limit=with LIMIT # LIMIT number of rows select_limit=with LIMIT # LIMIT number of rows
...@@ -394,7 +395,7 @@ select_limit2=yes # SELECT with LIMIT #,# ...@@ -394,7 +395,7 @@ select_limit2=yes # SELECT with LIMIT #,#
select_string_size=16777207 # constant string size in SELECT select_string_size=16777207 # constant string size in SELECT
select_table_update=yes # Update with sub select select_table_update=yes # Update with sub select
select_without_from=yes # SELECT without FROM select_without_from=yes # SELECT without FROM
server_version=PostgreSQL version 7.0.2 # server version server_version=PostgreSQL version 7.1.1 # server version
simple_joins=yes # ANSI SQL simple joins simple_joins=yes # ANSI SQL simple joins
storage_of_float=round # Storage of float values storage_of_float=round # Storage of float values
subqueries=yes # subqueries subqueries=yes # subqueries
...@@ -466,7 +467,7 @@ type_extra_timespan=yes # Type timespan ...@@ -466,7 +467,7 @@ type_extra_timespan=yes # Type timespan
type_extra_uint=no # Type uint type_extra_uint=no # Type uint
type_extra_varchar2(1_arg)=no # Type varchar2(1 arg) type_extra_varchar2(1_arg)=no # Type varchar2(1 arg)
type_extra_year=no # Type year type_extra_year=no # Type year
type_odbc_bigint=no # Type bigint type_odbc_bigint=yes # Type bigint
type_odbc_binary(1_arg)=no # Type binary(1 arg) type_odbc_binary(1_arg)=no # Type binary(1 arg)
type_odbc_datetime=yes # Type datetime type_odbc_datetime=yes # Type datetime
type_odbc_tinyint=no # Type tinyint type_odbc_tinyint=no # Type tinyint
...@@ -519,4 +520,4 @@ union_incompat=yes # union (incompatible lists) ...@@ -519,4 +520,4 @@ union_incompat=yes # union (incompatible lists)
unique_in_create=yes # unique in create table unique_in_create=yes # unique in create table
unique_null_in_create=yes # unique null in create unique_null_in_create=yes # unique null in create
views=yes # views views=yes # views
where_string_size=16777182 # constant string size in where where_string_size=16777181 # constant string size in where
...@@ -122,53 +122,49 @@ sub new ...@@ -122,53 +122,49 @@ sub new
$self->{'vacuum'} = 1; # When using with --fast $self->{'vacuum'} = 1; # When using with --fast
$self->{'drop_attr'} = ""; $self->{'drop_attr'} = "";
$limits{'max_conditions'} = 9999; # (Actually not a limit) $limits{'NEG'} = 1; # Supports -id
$limits{'max_columns'} = 2000; # Max number of columns in table
# Windows can't handle that many files in one directory
$limits{'max_tables'} = (($machine || '') =~ "^win") ? 5000 : 65000;
$limits{'max_text_size'} = 65000; # Max size with default buffers.
$limits{'query_size'} = 1000000; # Max size with default buffers.
$limits{'max_index'} = 16; # Max number of keys
$limits{'max_index_parts'} = 16; # Max segments/key
$limits{'max_column_name'} = 64; # max table and column name
$limits{'join_optimizer'} = 1; # Can optimize FROM tables
$limits{'load_data_infile'} = 1; # Has load data infile
$limits{'lock_tables'} = 1; # Has lock tables
$limits{'functions'} = 1; # Has simple functions (+/-)
$limits{'group_functions'} = 1; # Have group functions
$limits{'group_func_sql_min_str'} = 1; # Can execute MIN() and MAX() on strings
$limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'select_without_from'}= 1; # Can do 'select 1';
$limits{'multi_drop'} = 1; # Drop table can take many tables
$limits{'subqueries'} = 0; # Doesn't support sub-queries.
$limits{'left_outer_join'} = 1; # Supports left outer joins
$limits{'table_wildcard'} = 1; # Has SELECT table_name.*
$limits{'having_with_alias'} = 1; # Can use aliases in HAVING
$limits{'having_with_group'} = 1; # Can use group functions in HAVING
$limits{'like_with_column'} = 1; # Can use column1 LIKE column2
$limits{'order_by_position'} = 1; # Can use 'ORDER BY 1'
$limits{'group_by_position'} = 1; # Can use 'GROUP BY 1'
$limits{'alter_table'} = 1; # Have ALTER TABLE
$limits{'alter_add_multi_col'}= 1; #Have ALTER TABLE t add a int,add b int; $limits{'alter_add_multi_col'}= 1; #Have ALTER TABLE t add a int,add b int;
$limits{'alter_table'} = 1; # Have ALTER TABLE
$limits{'alter_table_dropcol'}= 1; # Have ALTER TABLE DROP column $limits{'alter_table_dropcol'}= 1; # Have ALTER TABLE DROP column
$limits{'insert_multi_value'} = 1; # Have INSERT ... values (1,2),(3,4) $limits{'column_alias'} = 1; # Alias for fields in select statement.
$limits{'group_func_extra_std'} = 1; # Have group function std().
$limits{'func_odbc_mod'} = 1; # Have function mod.
$limits{'func_extra_%'} = 1; # Has % as alias for mod() $limits{'func_extra_%'} = 1; # Has % as alias for mod()
$limits{'func_odbc_floor'} = 1; # Has func_odbc_floor function
$limits{'func_extra_if'} = 1; # Have function if. $limits{'func_extra_if'} = 1; # Have function if.
$limits{'column_alias'} = 1; # Alias for fields in select statement.
$limits{'NEG'} = 1; # Supports -id
$limits{'func_extra_in_num'} = 1; # Has function in $limits{'func_extra_in_num'} = 1; # Has function in
$limits{'limit'} = 1; # supports the limit attribute $limits{'func_odbc_floor'} = 1; # Has func_odbc_floor function
$limits{'unique_index'} = 1; # Unique index works or not $limits{'func_odbc_mod'} = 1; # Have function mod.
$limits{'functions'} = 1; # Has simple functions (+/-)
$limits{'group_by_position'} = 1; # Can use 'GROUP BY 1'
$limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'group_func_extra_std'} = 1; # Have group function std().
$limits{'group_func_sql_min_str'} = 1; # Can execute MIN() and MAX() on strings
$limits{'group_functions'} = 1; # Have group functions
$limits{'having_with_alias'} = 1; # Can use aliases in HAVING
$limits{'having_with_group'} = 1; # Can use group functions in HAVING
$limits{'insert_multi_value'} = 1; # Have INSERT ... values (1,2),(3,4)
$limits{'insert_select'} = 1; $limits{'insert_select'} = 1;
$limits{'working_blobs'} = 1; # If big varchar/blobs works $limits{'join_optimizer'} = 1; # Can optimize FROM tables
$limits{'left_outer_join'} = 1; # Supports left outer joins
$limits{'like_with_column'} = 1; # Can use column1 LIKE column2
$limits{'limit'} = 1; # supports the limit attribute
$limits{'load_data_infile'} = 1; # Has load data infile
$limits{'lock_tables'} = 1; # Has lock tables
$limits{'max_column_name'} = 64; # max table and column name
$limits{'max_columns'} = 2000; # Max number of columns in table
$limits{'max_conditions'} = 9999; # (Actually not a limit)
$limits{'max_index'} = 16; # Max number of keys
$limits{'max_index_parts'} = 16; # Max segments/key
$limits{'max_tables'} = (($machine || '') =~ "^win") ? 5000 : 65000;
$limits{'max_text_size'} = 1000000; # Good enough for tests
$limits{'multi_drop'} = 1; # Drop table can take many tables
$limits{'order_by_position'} = 1; # Can use 'ORDER BY 1'
$limits{'order_by_unused'} = 1; $limits{'order_by_unused'} = 1;
$limits{'query_size'} = 1000000; # Max size with default buffers.
$limits{'select_without_from'}= 1; # Can do 'select 1';
$limits{'subqueries'} = 0; # Doesn't support sub-queries.
$limits{'table_wildcard'} = 1; # Has SELECT table_name.*
$limits{'unique_index'} = 1; # Unique index works or not
$limits{'working_all_fields'} = 1; $limits{'working_all_fields'} = 1;
$limits{'working_blobs'} = 1; # If big varchar/blobs works
$smds{'time'} = 1; $smds{'time'} = 1;
$smds{'q1'} = 'b'; # with time not supp by mysql ('') $smds{'q1'} = 'b'; # with time not supp by mysql ('')
...@@ -569,12 +565,12 @@ sub new ...@@ -569,12 +565,12 @@ sub new
$self->{'drop_attr'} = ""; $self->{'drop_attr'} = "";
$self->{"vacuum"} = 1; $self->{"vacuum"} = 1;
$limits{'join_optimizer'} = 1; # Can optimize FROM tables $limits{'join_optimizer'} = 1; # Can optimize FROM tables
$limits{'load_data_infile'} = 0; # Is this true ? $limits{'load_data_infile'} = 0;
$limits{'NEG'} = 1; # Can't handle -id $limits{'NEG'} = 1;
$limits{'alter_table'} = 1; # alter ??
$limits{'alter_add_multi_col'}= 0; # alter_add_multi_col ? $limits{'alter_add_multi_col'}= 0; # alter_add_multi_col ?
$limits{'alter_table_dropcol'}= 0; # alter_drop_col ? $limits{'alter_table'} = 1;
$limits{'alter_table_dropcol'}= 0;
$limits{'column_alias'} = 1; $limits{'column_alias'} = 1;
$limits{'func_extra_%'} = 1; $limits{'func_extra_%'} = 1;
$limits{'func_extra_if'} = 0; $limits{'func_extra_if'} = 0;
...@@ -583,33 +579,33 @@ sub new ...@@ -583,33 +579,33 @@ sub new
$limits{'func_odbc_mod'} = 1; # Has % $limits{'func_odbc_mod'} = 1; # Has %
$limits{'functions'} = 1; $limits{'functions'} = 1;
$limits{'group_by_position'} = 1; $limits{'group_by_position'} = 1;
$limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'group_func_extra_std'} = 0; $limits{'group_func_extra_std'} = 0;
$limits{'group_func_sql_min_str'}= 1; # Can execute MIN() and MAX() on strings $limits{'group_func_sql_min_str'}= 1; # Can execute MIN() and MAX() on strings
$limits{'group_functions'} = 1; $limits{'group_functions'} = 1;
$limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'having_with_alias'} = 0; $limits{'having_with_alias'} = 0;
$limits{'having_with_group'} = 1; $limits{'having_with_group'} = 1;
$limits{'left_outer_join'} = 0; $limits{'insert_select'} = 1;
$limits{'left_outer_join'} = 1;
$limits{'like_with_column'} = 1; $limits{'like_with_column'} = 1;
$limits{'lock_tables'} = 0; # in ATIS gives this a problem $limits{'lock_tables'} = 0; # in ATIS gives this a problem
$limits{'max_column_name'} = 128;
$limits{'max_columns'} = 1000; # 500 crashes pg 6.3
$limits{'max_conditions'} = 9999; # This makes Pg real slow
$limits{'max_index'} = 64; # Big enough
$limits{'max_index_parts'} = 16;
$limits{'max_tables'} = 5000; # 10000 crashes pg 7.0.2
$limits{'max_text_size'} = 65000; # Good enough for test
$limits{'multi_drop'} = 1; $limits{'multi_drop'} = 1;
$limits{'order_by_position'} = 1; $limits{'order_by_position'} = 1;
$limits{'order_by_unused'} = 1;
$limits{'query_size'} = 16777216;
$limits{'select_without_from'}= 1; $limits{'select_without_from'}= 1;
$limits{'subqueries'} = 1; $limits{'subqueries'} = 1;
$limits{'table_wildcard'} = 1; $limits{'table_wildcard'} = 1;
$limits{'max_column_name'} = 32; # Is this true
$limits{'max_columns'} = 1000; # 500 crashes pg 6.3
$limits{'max_tables'} = 5000; # 10000 crashes pg 7.0.2
$limits{'max_conditions'} = 30; # This makes Pg real slow
$limits{'max_index'} = 64; # Is this true ?
$limits{'max_index_parts'} = 16; # Is this true ?
$limits{'max_text_size'} = 7000; # 8000 crashes pg 6.3
$limits{'query_size'} = 16777216;
$limits{'unique_index'} = 1; # Unique index works or not $limits{'unique_index'} = 1; # Unique index works or not
$limits{'insert_select'} = 1;
$limits{'working_blobs'} = 1; # If big varchar/blobs works
$limits{'order_by_unused'} = 1;
$limits{'working_all_fields'} = 1; $limits{'working_all_fields'} = 1;
$limits{'working_blobs'} = 1; # If big varchar/blobs works
# the different cases per query ... # the different cases per query ...
$smds{'q1'} = 'b'; # with time $smds{'q1'} = 'b'; # with time
...@@ -640,7 +636,7 @@ sub new ...@@ -640,7 +636,7 @@ sub new
sub version sub version
{ {
my ($version,$dir); my ($version,$dir);
foreach $dir ($ENV{'PGDATA'},"/usr/local/pgsql/data", "/my/local/pgsql/") foreach $dir ($ENV{'PGDATA'},"/usr/local/pgsql/data", "/usr/local/pg/data")
{ {
if ($dir && -e "$dir/PG_VERSION") if ($dir && -e "$dir/PG_VERSION")
{ {
......
...@@ -266,7 +266,7 @@ for ($i=0 ; $i < $opt_loop_count ; $i++) ...@@ -266,7 +266,7 @@ for ($i=0 ; $i < $opt_loop_count ; $i++)
} }
$end_time=new Benchmark; $end_time=new Benchmark;
print "Time to select_big ($opt_loop_count): " . print "Time to select_big_str ($opt_loop_count): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n"; timestr(timediff($end_time, $loop_time),"all") . "\n\n";
$sth = $dbh->do("drop table bench1" . $server->{'drop_attr'}) $sth = $dbh->do("drop table bench1" . $server->{'drop_attr'})
......
...@@ -19,11 +19,7 @@ ...@@ -19,11 +19,7 @@
#pragma implementation // gcc: Class implementation #pragma implementation // gcc: Class implementation
#endif #endif
#include <string.h>
#include "mysql_priv.h" #include "mysql_priv.h"
#include "my_pthread.h"
#ifdef HAVE_GEMINI_DB #ifdef HAVE_GEMINI_DB
#include "ha_gemini.h" #include "ha_gemini.h"
#include "dbconfig.h" #include "dbconfig.h"
...@@ -33,6 +29,7 @@ ...@@ -33,6 +29,7 @@
#include <m_ctype.h> #include <m_ctype.h>
#include <myisampack.h> #include <myisampack.h>
#include <m_string.h>
#include <assert.h> #include <assert.h>
#include <hash.h> #include <hash.h>
#include <stdarg.h> #include <stdarg.h>
......
...@@ -2142,6 +2142,7 @@ ha_innobase::external_lock( ...@@ -2142,6 +2142,7 @@ ha_innobase::external_lock(
prebuilt->in_update_remember_pos = TRUE; prebuilt->in_update_remember_pos = TRUE;
if (lock_type == F_WRLCK) { if (lock_type == F_WRLCK) {
/* If this is a SELECT, then it is in UPDATE TABLE ... /* If this is a SELECT, then it is in UPDATE TABLE ...
or SELECT ... FOR UPDATE */ or SELECT ... FOR UPDATE */
prebuilt->select_lock_type = LOCK_X; prebuilt->select_lock_type = LOCK_X;
...@@ -2153,13 +2154,27 @@ ha_innobase::external_lock( ...@@ -2153,13 +2154,27 @@ ha_innobase::external_lock(
} }
trx->n_mysql_tables_in_use++; trx->n_mysql_tables_in_use++;
if (prebuilt->select_lock_type != LOCK_NONE) {
trx->mysql_n_tables_locked++;
}
} else { } else {
trx->n_mysql_tables_in_use--; trx->n_mysql_tables_in_use--;
if (trx->n_mysql_tables_in_use == 0 && if (trx->n_mysql_tables_in_use == 0) {
!(thd->options
& (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN))) { trx->mysql_n_tables_locked = 0;
innobase_commit(thd, trx);
if (trx->has_search_latch) {
trx_search_latch_release_if_reserved(trx);
}
if (!(thd->options
& (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN))) {
innobase_commit(thd, trx);
}
} }
} }
...@@ -2690,6 +2705,39 @@ ha_innobase::info( ...@@ -2690,6 +2705,39 @@ ha_innobase::info(
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
/***********************************************************************
Tries to check that an InnoDB table is not corrupted. If corruption is
noticed, prints to stderr information about it. In case of corruption
may also assert a failure and crash the server. */
int
ha_innobase::check(
/*===============*/
/* out: HA_ADMIN_CORRUPT or
HA_ADMIN_OK */
THD* thd, /* in: user thread handle */
HA_CHECK_OPT* check_opt) /* in: check options, currently
ignored */
{
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
ulint ret;
if (prebuilt->mysql_template == NULL) {
/* Build the template; we will use a dummy template
in index scans done in checking */
build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW);
}
ret = row_check_table_for_mysql(prebuilt);
if (ret == DB_SUCCESS) {
return(HA_ADMIN_OK);
}
return(HA_ADMIN_CORRUPT);
}
/***************************************************************** /*****************************************************************
Adds information about free space in the InnoDB tablespace to a Adds information about free space in the InnoDB tablespace to a
table comment which is printed out when a user calls SHOW TABLE STATUS. */ table comment which is printed out when a user calls SHOW TABLE STATUS. */
......
...@@ -142,7 +142,7 @@ class ha_innobase: public handler ...@@ -142,7 +142,7 @@ class ha_innobase: public handler
HA_CREATE_INFO *create_info); HA_CREATE_INFO *create_info);
int delete_table(const char *name); int delete_table(const char *name);
int rename_table(const char* from, const char* to); int rename_table(const char* from, const char* to);
int check(THD* thd, HA_CHECK_OPT* check_opt);
char* update_table_comment(const char* comment); char* update_table_comment(const char* comment);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
......
...@@ -95,17 +95,16 @@ int deny_severity = LOG_WARNING; ...@@ -95,17 +95,16 @@ int deny_severity = LOG_WARNING;
#include <sys/mman.h> #include <sys/mman.h>
#endif #endif
#ifdef _AIX41
int initgroups(const char *,unsigned int);
#endif
#if defined(__FreeBSD__) && defined(HAVE_IEEEFP_H) #if defined(__FreeBSD__) && defined(HAVE_IEEEFP_H)
#include <ieeefp.h> #include <ieeefp.h>
#ifdef HAVE_FP_EXCEPT // Fix type conflict #ifdef HAVE_FP_EXCEPT // Fix type conflict
typedef fp_except fp_except_t; typedef fp_except fp_except_t;
#endif #endif
#ifdef _AIX41
extern "C" int initgroups(const char *,int);
#endif
/* We can't handle floating point expections with threads, so disable /* We can't handle floating point expections with threads, so disable
this on freebsd this on freebsd
*/ */
......
...@@ -88,6 +88,7 @@ for ($i=0 ; $i < $opt_threads ; $i ++) ...@@ -88,6 +88,7 @@ for ($i=0 ; $i < $opt_threads ; $i ++)
{ {
test_select() if (($pid=fork()) == 0); $work{$pid}="select_key"; test_select() if (($pid=fork()) == 0); $work{$pid}="select_key";
} }
test_select_count() if (($pid=fork()) == 0); $work{$pid}="select_count";
test_delete() if (($pid=fork()) == 0); $work{$pid}="delete"; test_delete() if (($pid=fork()) == 0); $work{$pid}="delete";
test_update() if (($pid=fork()) == 0); $work{$pid}="update"; test_update() if (($pid=fork()) == 0); $work{$pid}="update";
test_flush() if (($pid=fork()) == 0); $work{$pid}= "flush"; test_flush() if (($pid=fork()) == 0); $work{$pid}= "flush";
...@@ -213,6 +214,35 @@ sub test_select ...@@ -213,6 +214,35 @@ sub test_select
exit(0); exit(0);
} }
#
# Do big select count(distinct..) over the table
#
sub test_select_count
{
my ($dbh, $i, $j, $count, $loop);
$dbh = DBI->connect("DBI:mysql:$opt_db:$opt_host",
$opt_user, $opt_password,
{ PrintError => 0}) || die $DBI::errstr;
$count=0;
$i=0;
while (!test_if_abort($dbh))
{
for ($j=0 ; $j < $numtables ; $j++)
{
my ($table)= $testtables[$j]->[0];
simple_query($dbh, "select count(distinct marker),count(distinct id),count(distinct info) from $table");
$count++;
}
sleep(20); # This query is quite slow
}
$dbh->disconnect; $dbh=0;
print "Test_select: Executed $count select count(distinct) queries\n";
exit(0);
}
# #
# Delete 1-5 rows from the first 2 tables. # Delete 1-5 rows from the first 2 tables.
# Test ends when the number of rows for table 3 didn't change during # Test ends when the number of rows for table 3 didn't change during
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment