Commit f48ed453 authored by serg@serg.mysql.com's avatar serg@serg.mysql.com

merged

parents 568d72f0 6991ee78
......@@ -44,7 +44,7 @@ pentium_cflags="-mpentiumpro"
sparc_cflags=""
fast_cflags="-O6 -fno-omit-frame-pointer"
reckless_cflags="-O6 -fomit-frame-pointer"
reckless_cflags="-O6 -fomit-frame-pointer -ffixed-ebp"
debug_cflags="-DEXTRA_DEBUG -DFORCE_INIT_OF_VARS -DSAFEMALLOC -DSAFE_MUTEX -O2"
base_cxxflags="-felide-constructors -fno-exceptions -fno-rtti"
......
......@@ -3,7 +3,7 @@
path=`dirname $0`
. "$path/SETUP.sh"
extra_flags="$pentium_cflags $fast_cflags"
extra_flags="$pentium_cflags $reckless_cflags"
extra_configs="$pentium_configs"
strip=yes
......
......@@ -8,6 +8,6 @@ c_warnings="$c_warnings $debug_extra_warnings"
cxx_warnings="$cxx_warnings $debug_extra_warnings"
extra_configs="$pentium_configs $debug_configs"
extra_configs="$extra_configs --with-berkeley-db --with-innobase"
extra_configs="$extra_configs --with-berkeley-db --with-innodb"
. "$path/FINISH.sh"
......@@ -7,6 +7,6 @@ extra_flags="$pentium_cflags $fast_cflags"
extra_configs="$pentium_configs"
strip=yes
extra_configs="$extra_configs --with-innobase --with-berkeley-db"
extra_configs="$extra_configs --with-innodb --with-berkeley-db"
. "$path/FINISH.sh"
heikki@donna.mysql.fi
monty@donna.mysql.fi
monty@work.mysql.com
paul@central.snake.net
sasha@mysql.sashanet.com
serg@serg.mysql.com
......@@ -56,7 +56,7 @@ aclocal; autoheader; aclocal; automake; autoconf
(cd bdb/dist && sh s_all)
(cd innobase && aclocal && autoheader && aclocal && automake && autoconf)
# A normal user starts here. We must use mit-threads, bdb and innobase.
# A normal user starts here. We must use mit-threads, bdb and innodb.
# Otherwise they do not end up in the distribution.
./configure \
--with-unix-socket-path=/var/tmp/mysql.sock \
......@@ -64,7 +64,7 @@ aclocal; autoheader; aclocal; automake; autoconf
--with-mit-threads=yes $EXTRA_CONFIG \
--enable-thread-safe-client \
--with-berkeley-db \
--with-innobase
--with-innodb
gmake -j 2
......
......@@ -121,6 +121,7 @@ distribution for that version.
* Tutorial:: @strong{MySQL} Tutorial
* Server:: @strong{MySQL} Server
* Replication:: Replication
* Fulltext Search:: Fulltext Search
* Performance:: Getting maximum performance from @strong{MySQL}
* MySQL Benchmarks:: The @strong{MySQL} benchmark suite
* Tools:: @strong{MySQL} Utilities
......@@ -497,7 +498,7 @@ MySQL Table Types
* HEAP:: HEAP tables
* BDB:: BDB or Berkeley_db tables
* GEMINI:: GEMINI tables
* INNOBASE:: INNOBASE tables
* INNODB:: INNODB tables
MyISAM Tables
......@@ -527,12 +528,12 @@ GEMINI Tables
* GEMINI features::
* GEMINI TODO::
INNOBASE Tables
INNODB Tables
* INNOBASE overview::
* INNOBASE start:: INNOBASE startup options
* Using INNOBASE tables:: Using INNOBASE tables
* INNOBASE restrictions:: Some restrictions on @code{INNOBASE} tables:
* INNODB overview::
* INNODB start:: INNODB startup options
* Using INNODB tables:: Using INNODB tables
* INNODB restrictions:: Some restrictions on @code{INNODB} tables:
MySQL Tutorial
......@@ -602,6 +603,13 @@ Replication in MySQL
* Replication FAQ:: Frequently Asked Questions about replication
* Replication Problems:: Troubleshooting Replication.
MySQL Full-text Search
* Fulltext Search::
* Fulltext Fine-tuning::
* Fulltext Features to Appear in MySQL 4.0::
* Fulltext TODO::
Getting Maximum Performance from MySQL
* Optimize Basics:: Optimization overview
......@@ -884,15 +892,8 @@ How MySQL Compares to @code{mSQL}
MySQL Internals
* MySQL threads:: MySQL threads
* MySQL full-text search:: MySQL full-text search
* MySQL test suite:: MySQL test suite
MySQL Full-text Search
* Fulltext Fine-tuning::
* Fulltext features to appear in MySQL 4.0::
* Fulltext TODO::
Credits
* Developers::
......@@ -2209,7 +2210,7 @@ The Berkeley DB code is very stable, but we are still improving the interface
between @strong{MySQL} and BDB tables, so it will take some time before this
is as tested as the other table types.
@item Innobase Tables -- Alpha
@item Innodb Tables -- Alpha
This is a very recent addition to @code{MySQL} and is not very tested yet.
@item Automatic recovery of MyISAM tables - Beta
......@@ -4129,12 +4130,12 @@ phone back within 48 hours to discuss @code{MySQL} related issues.
@end itemize
@cindex support, BDB Tables
@cindex support, INNOBASE Tables
@cindex support, INNODB Tables
@cindex support, GEMINI Tables
@node Table handler support, , Telephone support, Support
@subsection Support for other table handlers
To get support for @code{BDB} tables, @code{INNOBASE} tables or
To get support for @code{BDB} tables, @code{INNODB} tables or
@code{GEMINI} tables you have to pay an additional 30% on the standard
support price for each of the table handlers you would like to have
support for.
......@@ -5072,7 +5073,7 @@ extra options that you may want to use:
@itemize @bullet
@item --with-berkeley-db
@item --with-innobase
@item --with-innodb
@item --with-raid
@item --with-libwrap
@item --with-named-z-lib (This is done for some of the binaries)
......@@ -7432,6 +7433,17 @@ If you see a dead @code{mysqld} daemon process with @code{ps}, this usually
means that you have found a bug in @strong{MySQL} or you have a corrupted
table. @xref{Crashing}.
To get a core dump on Linux if mysqld dies with a SIGSEGV
signal, you can start mysqld with the @code{--core-file} option. Note
that you also probably need to raise the @code{core file size} by adding
@code{ulimit -c 1000000} to @code{safe_mysqld} or starting @code{safe_mysqld}
with @code{--core-file-sizes=1000000}. @xref{safe_mysqld}.
@c the stuff below is really out of date - hardly anybody uses it anymore
If you are using LinuxThreads and @code{mysqladmin shutdown} doesn't work,
you must upgrade to LinuxThreads Version 0.7.1 or newer.
To get a core dump on Linux if mysqld dies with a SIGSEGV signal, you can
start mysqld with the @code{--core-file} option. Note that you also probably
need to raise the @code{core file size} by adding @code{ulimit -c 1000000} to
......@@ -9255,7 +9267,7 @@ above can always mail them to the developer's mailing list at
@email{internals@@lists.mysql.com}.
RPM distributions prior to @strong{MySQL} Version 3.22 are user-contributed.
Beginning with Version 3.22, some RPMs are generated by us at
Beginning with Version 3.22, the RPMs are generated by us at
@strong{MySQL AB}.
If you want to compile a debug version of @strong{MySQL}, you should add
......@@ -9807,8 +9819,8 @@ yourself with the different BDB specific startup options. @xref{BDB start}.
If you are using Gemini tables, refer to the Gemini-specific startup options.
@xref{GEMINI start}.
If you are using Innobase tables, refer to the Innobase-specific startup
options. @xref{INNOBASE start}.
If you are using Innodb tables, refer to the Innodb-specific startup
options. @xref{INNODB start}.
@node Automatic start, Command-line options, Starting server, Post-installation
@subsection Starting and Stopping MySQL Automatically
......@@ -11220,7 +11232,7 @@ issue. For those of our users who are concerned with or have wondered
about transactions vis-a-vis @strong{MySQL}, there is a ``@strong{MySQL}
way'' as we have outlined above. For those where safety is more
important than speed, we recommend them to use the @code{BDB},
@code{GEMINI} or @code{INNOBASE} tables for all their critical
@code{GEMINI} or @code{INNODB} tables for all their critical
data. @xref{Table types}.
One final note: We are currently working on a safe replication schema
......@@ -11448,11 +11460,11 @@ Entry level SQL92. ODBC levels 0-2.
@cindex updating, tables
@cindex @code{BDB} tables
@cindex @code{GEMINI} tables
@cindex @code{INNOBASE} tables
@cindex @code{INNODB} tables
The following mostly applies only for @code{ISAM}, @code{MyISAM}, and
@code{HEAP} tables. If you only use transaction-safe tables (@code{BDB},
@code{GEMINI} or @code{INNOBASE} tables) in an an update, you can do
@code{GEMINI} or @code{INNODB} tables) in an an update, you can do
@code{COMMIT} and @code{ROLLBACK} also with @strong{MySQL}.
@xref{COMMIT}.
......@@ -15550,7 +15562,7 @@ In @strong{MySQL} Version 3.23.23 or later, you can also create special
@code{MyISAM} table type supports @code{FULLTEXT} indexes. They can be
created only from @code{VARCHAR} and @code{TEXT} columns.
Indexing always happens over the entire column and partial indexing is not
supported. See @ref{MySQL full-text search} for details.
supported. See @ref{Fulltext Search} for details.
@cindex multi-column indexes
@cindex indexes, multi-column
......@@ -16293,7 +16305,7 @@ For @code{MATCH ... AGAINST()} to work, a @strong{FULLTEXT} index
must be created first. @xref{CREATE TABLE, , @code{CREATE TABLE}}.
@code{MATCH ... AGAINST()} is available in @strong{MySQL} Version
3.23.23 or later. For details and usage examples
@pxref{MySQL full-text search}.
@pxref{Fulltext Search}.
@end table
@findex casts
......@@ -18469,7 +18481,7 @@ reference_option:
RESTRICT | CASCADE | SET NULL | NO ACTION | SET DEFAULT
table_options:
TYPE = @{BDB | HEAP | ISAM | INNOBASE | MERGE | MYISAM @}
TYPE = @{BDB | HEAP | ISAM | INNODB | MERGE | MYISAM @}
or AUTO_INCREMENT = #
or AVG_ROW_LENGTH = #
or CHECKSUM = @{0 | 1@}
......@@ -18678,7 +18690,7 @@ In @strong{MySQL} Version 3.23.23 or later, you can also create special
@code{MyISAM} table type supports @code{FULLTEXT} indexes. They can be created
only from @code{VARCHAR} and @code{TEXT} columns.
Indexing always happens over the entire column, partial indexing is not
supported. See @ref{MySQL full-text search} for details of operation.
supported. See @ref{Fulltext Search} for details of operation.
@item
The @code{FOREIGN KEY}, @code{CHECK}, and @code{REFERENCES} clauses don't
......@@ -18711,7 +18723,7 @@ The different table types are:
@item GEMINI @tab Transaction-safe tables with row-level locking @xref{GEMINI}.
@item HEAP @tab The data for this table is only stored in memory. @xref{HEAP}.
@item ISAM @tab The original table handler. @xref{ISAM}.
@item INNOBASE @tab Transaction-safe tables with row locking. @xref{INNOBASE}.
@item INNODB @tab Transaction-safe tables with row locking. @xref{INNODB}.
@item MERGE @tab A collection of MyISAM tables used as one table. @xref{MERGE}.
@item MyISAM @tab The new binary portable table handler that is replacing ISAM. @xref{MyISAM}.
@end multitable
......@@ -21124,7 +21136,7 @@ The following columns are returned:
@item @code{Comment} @tab The comment used when creating the table (or some information why @strong{MySQL} couldn't access the table information).
@end multitable
@code{INNOBASE} tables will report the free space in the tablespace
@code{INNODB} tables will report the free space in the tablespace
in the table comment.
@node SHOW STATUS, SHOW VARIABLES, SHOW TABLE STATUS, SHOW
......@@ -21330,7 +21342,7 @@ differ somewhat:
| flush_time | 0 |
| have_bdb | YES |
| have_gemini | NO |
| have_innobase | YES |
| have_innodb | YES |
| have_raid | YES |
| have_ssl | NO |
| init_file | |
......@@ -21517,9 +21529,9 @@ if @code{--skip-bdb} is used.
@item @code{have_gemini}
@code{YES} if @code{mysqld} supports Gemini tables. @code{DISABLED}
if @code{--skip-gemini} is used.
@item @code{have_innobase}
@code{YES} if @code{mysqld} supports Innobase tables. @code{DISABLED}
if @code{--skip-innobase} is used.
@item @code{have_innodb}
@code{YES} if @code{mysqld} supports Innodb tables. @code{DISABLED}
if @code{--skip-innodb} is used.
@item @code{have_raid}
@code{YES} if @code{mysqld} supports the @code{RAID} option.
@item @code{have_ssl}
......@@ -22278,7 +22290,7 @@ as soon as you execute an update, @strong{MySQL} will store the update on
disk.
If you are using transactions safe tables (like @code{BDB},
@code{INNOBASE} or @code{GEMINI}), you can put @strong{MySQL} into
@code{INNODB} or @code{GEMINI}), you can put @strong{MySQL} into
non-@code{autocommit} mode with the following command:
@example
......@@ -22883,7 +22895,7 @@ For more information about how @strong{MySQL} uses indexes, see
@code{FULLTEXT} indexes can index only @code{VARCHAR} and
@code{TEXT} columns, and only in @code{MyISAM} tables. @code{FULLTEXT} indexes
are available in @strong{MySQL} Version 3.23.23 and later.
@ref{MySQL full-text search}.
@ref{Fulltext Search}.
@findex DROP INDEX
@node DROP INDEX, Comments, CREATE INDEX, Reference
......@@ -23105,7 +23117,7 @@ used them.
@cindex @code{GEMINI} table type
@cindex @code{HEAP} table type
@cindex @code{ISAM} table type
@cindex @code{INNOBASE} table type
@cindex @code{INNODB} table type
@cindex @code{MERGE} table type
@cindex MySQL table types
@cindex @code{MyISAM} table type
......@@ -23116,7 +23128,7 @@ used them.
As of @strong{MySQL} Version 3.23.6, you can choose between three basic
table formats (@code{ISAM}, @code{HEAP} and @code{MyISAM}. Newer
@strong{MySQL} may support additional table type (@code{BDB},
@code{GEMINI} or @code{INNOBASE}), depending on how you compile it.
@code{GEMINI} or @code{INNODB}), depending on how you compile it.
When you create a new table, you can tell @strong{MySQL} which table
type it should use for the table. @strong{MySQL} will always create a
......@@ -23131,7 +23143,7 @@ You can convert tables between different types with the @code{ALTER
TABLE} statement. @xref{ALTER TABLE, , @code{ALTER TABLE}}.
Note that @strong{MySQL} supports two different kinds of
tables. Transaction-safe tables (@code{BDB}, @code{INNOBASE} or
tables. Transaction-safe tables (@code{BDB}, @code{INNODB} or
@code{GEMINI}) and not transaction-safe tables (@code{HEAP}, @code{ISAM},
@code{MERGE}, and @code{MyISAM}).
......@@ -23174,7 +23186,7 @@ of both worlds.
* HEAP:: HEAP tables
* BDB:: BDB or Berkeley_db tables
* GEMINI:: GEMINI tables
* INNOBASE:: INNOBASE tables
* INNODB:: INNODB tables
@end menu
@node MyISAM, MERGE, Table types, Table types
......@@ -24084,7 +24096,7 @@ not trivial).
@end itemize
@cindex tables, @code{GEMINI}
@node GEMINI, INNOBASE, BDB, Table types
@node GEMINI, INNODB, BDB, Table types
@section GEMINI Tables
@menu
......@@ -24165,168 +24177,168 @@ limited by @code{gemini_connection_limit}. The default is 100 users.
NuSphere is working on removing these limitations.
@node INNOBASE, , GEMINI, Table types
@section INNOBASE Tables
@node INNODB, , GEMINI, Table types
@section INNODB Tables
@menu
* INNOBASE overview::
* INNOBASE start:: INNOBASE startup options
* Using INNOBASE tables:: Using INNOBASE tables
* INNOBASE restrictions:: Some restrictions on @code{INNOBASE} tables:
* INNODB overview::
* INNODB start:: INNODB startup options
* Using INNODB tables:: Using INNODB tables
* INNODB restrictions:: Some restrictions on @code{INNODB} tables:
@end menu
@node INNOBASE overview, INNOBASE start, INNOBASE, INNOBASE
@subsection INNOBASE Tables overview
@node INNODB overview, INNODB start, INNODB, INNODB
@subsection INNODB Tables overview
Innobase tables are included in the @strong{MySQL} source distribution
Innodb tables are included in the @strong{MySQL} source distribution
starting from 3.23.34 and will be activated in the @strong{MySQL}-max
binary.
If you have downloaded a binary version of @strong{MySQL} that includes
support for Innobase, simply follow the instructions for
support for Innodb, simply follow the instructions for
installing a binary version of @strong{MySQL}. @xref{Installing binary}.
To compile @strong{MySQL} with Innobase support, download @strong{MySQL}
To compile @strong{MySQL} with Innodb support, download @strong{MySQL}
3.23.34 or newer and configure @code{MySQL} with the
@code{--with-innobase} option. @xref{Installing source}.
@code{--with-innodb} option. @xref{Installing source}.
@example
cd /path/to/source/of/mysql-3.23.34
./configure --with-innobase
./configure --with-innodb
@end example
Innobase provides @strong{MySQL} with a transaction safe table handler with
commit, rollback, and crash recovery capabilities. Innobase does
Innodb provides @strong{MySQL} with a transaction safe table handler with
commit, rollback, and crash recovery capabilities. Innodb does
locking on row level, and also provides an Oracle-style consistent
non-locking read in @code{SELECTS}, which increases transaction
concurrency. There is neither need for lock escalation in Innobase,
because row level locks in Innobase fit in very small space.
concurrency. There is neither need for lock escalation in Innodb,
because row level locks in Innodb fit in very small space.
Innobase is a table handler that is under the GNU GPL License Version 2
(of June 1991). In the source distribution of @strong{MySQL}, Innobase
Innodb is a table handler that is under the GNU GPL License Version 2
(of June 1991). In the source distribution of @strong{MySQL}, Innodb
appears as a subdirectory.
@node INNOBASE start, Using INNOBASE tables, INNOBASE overview, INNOBASE
@subsection INNOBASE startup options
@node INNODB start, Using INNODB tables, INNODB overview, INNODB
@subsection INNODB startup options
To use Innobase tables you must specify configuration parameters
To use Innodb tables you must specify configuration parameters
in the @strong{MySQL} configuration file in the @code{[mysqld]} section of
the configuration file. Below is an example of possible configuration
parameters in my.cnf for Innobase:
parameters in my.cnf for Innodb:
@example
innobase_data_home_dir = /usr/local/mysql/var
innobase_log_group_home_dir = /usr/local/mysql/var
innobase_log_arch_dir = /usr/local/mysql/var
innobase_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
set-variable = innobase_mirrored_log_groups=1
set-variable = innobase_log_files_in_group=3
set-variable = innobase_log_file_size=5M
set-variable = innobase_log_buffer_size=8M
innobase_flush_log_at_trx_commit=1
innobase_log_archive=0
set-variable = innobase_buffer_pool_size=16M
set-variable = innobase_additional_mem_pool_size=2M
set-variable = innobase_file_io_threads=4
set-variable = innobase_lock_wait_timeout=50
innodb_data_home_dir = /usr/local/mysql/var
innodb_log_group_home_dir = /usr/local/mysql/var
innodb_log_arch_dir = /usr/local/mysql/var
innodb_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
set-variable = innodb_mirrored_log_groups=1
set-variable = innodb_log_files_in_group=3
set-variable = innodb_log_file_size=5M
set-variable = innodb_log_buffer_size=8M
innodb_flush_log_at_trx_commit=1
innodb_log_archive=0
set-variable = innodb_buffer_pool_size=16M
set-variable = innodb_additional_mem_pool_size=2M
set-variable = innodb_file_io_threads=4
set-variable = innodb_lock_wait_timeout=50
@end example
The meanings of the configuration parameters are the following:
@multitable @columnfractions .30 .70
@item @code{innobase_data_home_dir} @tab
The common part of the directory path for all innobase data files.
@item @code{innobase_data_file_path} @tab
@item @code{innodb_data_home_dir} @tab
The common part of the directory path for all innodb data files.
@item @code{innodb_data_file_path} @tab
Paths to individual data files and their sizes. The full directory path
to each data file is acquired by concatenating innobase_data_home_dir to
to each data file is acquired by concatenating innodb_data_home_dir to
the paths specified here. The file sizes are specified in megabytes,
hence the 'M' after the size specification above. Do not set a file size
bigger than 4000M, and on most operating systems not bigger than 2000M.
innobase_mirrored_log_groups Number of identical copies of log groups we
innodb_mirrored_log_groups Number of identical copies of log groups we
keep for the database. Currently this should be set to 1.
@item @code{innobase_log_group_home_dir} @tab
Directory path to Innobase log files.
@item @code{innobase_log_files_in_group} @tab
Number of log files in the log group. Innobase writes to the files in a
@item @code{innodb_log_group_home_dir} @tab
Directory path to Innodb log files.
@item @code{innodb_log_files_in_group} @tab
Number of log files in the log group. Innodb writes to the files in a
circular fashion. Value 3 is recommended here.
@item @code{innobase_log_file_size} @tab
@item @code{innodb_log_file_size} @tab
Size of each log file in a log group in megabytes. Sensible values range
from 1M to the size of the buffer pool specified below. The bigger the
value, the less checkpoint flush activity is needed in the buffer pool,
saving disk i/o. But bigger log files also mean that recovery will be
slower in case of a crash. File size restriction as for a data file.
@item @code{innobase_log_buffer_size} @tab
The size of the buffer which Innobase uses to write log to the log files
@item @code{innodb_log_buffer_size} @tab
The size of the buffer which Innodb uses to write log to the log files
on disk. Sensible values range from 1M to half the combined size of log
files. A big log buffer allows large transactions to run without a need
to write the log to disk until the transaction commit. Thus, if you have
big transactions, making the log buffer big will save disk i/o.
@item @code{innobase_flush_log_at_trx_commit} @tab
@item @code{innodb_flush_log_at_trx_commit} @tab
Normally this is set to 1, meaning that at a transaction commit the log
is flushed to disk, and the modifications made by the transaction become
permanent, and survive a database crash. If you are willing to
compromise this safety, and you are running small transactions, you may
set this to 0 to reduce disk i/o to the logs.
@item @code{innobase_log_arch_dir} @tab
@item @code{innodb_log_arch_dir} @tab
The directory where fully written log files would be archived if we used
log archiving. The value of this parameter should currently be set the
same as @code{innobase_log_group_home_dir}.
@item @code{innobase_log_archive} @tab
same as @code{innodb_log_group_home_dir}.
@item @code{innodb_log_archive} @tab
This value should currently be set to 0. As recovery from a backup is
done by @strong{MySQL} using its own log files, there is currently no need
to archive Innobase log files.
@item @code{innobase_buffer_pool_size} @tab
The size of the memory buffer Innobase uses to cache data and indexes of
to archive Innodb log files.
@item @code{innodb_buffer_pool_size} @tab
The size of the memory buffer Innodb uses to cache data and indexes of
its tables. The bigger you set this the less disk i/o is needed to
access data in tables. On a dedicated database server you may set this
parameter up to 90 % of the machine physical memory size. Do not set it
too large, though, because competition of the physical memory may cause
paging in the operating system.
@item @code{innobase_additional_mem_pool_size} @tab
Size of a memory pool Innobase uses to store data dictionary information
@item @code{innodb_additional_mem_pool_size} @tab
Size of a memory pool Innodb uses to store data dictionary information
and other internal data structures. A sensible value for this might be
2M, but the more tables you have in your application the more you will
need to allocate here. If Innobase runs out of memory in this pool, it
need to allocate here. If Innodb runs out of memory in this pool, it
will start to allocate memory from the operating system, and write
warning messages to the @strong{MySQL} error log.
@item @code{innobase_file_io_threads} @tab
Number of file i/o threads in Innobase. Normally, this should be 4, but
@item @code{innodb_file_io_threads} @tab
Number of file i/o threads in Innodb. Normally, this should be 4, but
on Windows NT disk i/o may benefit from a larger number.
@item @code{innobase_lock_wait_timeout} @tab
Timeout in seconds an Innobase transaction may wait for a lock before
being rolled back. Innobase automatically detects transaction deadlocks
@item @code{innodb_lock_wait_timeout} @tab
Timeout in seconds an Innodb transaction may wait for a lock before
being rolled back. Innodb automatically detects transaction deadlocks
in its own lock table and rolls back the transaction. If you use
@code{LOCK TABLES} command, or other transaction safe table handlers
than Innobase in the same transaction, then a deadlock may arise which
Innobase cannot notice. In cases like this the timeout is useful to
than Innodb in the same transaction, then a deadlock may arise which
Innodb cannot notice. In cases like this the timeout is useful to
resolve the situation.
@end multitable
@node Using INNOBASE tables, INNOBASE restrictions, INNOBASE start, INNOBASE
@subsection Using INNOBASE tables
@node Using INNODB tables, INNODB restrictions, INNODB start, INNODB
@subsection Using INNODB tables
Technically, Innobase is a database backend placed under @strong{MySQL}.
Innobase has its own buffer pool for caching data and indexes in main
memory. Innobase stores its tables and indexes in a tablespace, which
Technically, Innodb is a database backend placed under @strong{MySQL}.
Innodb has its own buffer pool for caching data and indexes in main
memory. Innodb stores its tables and indexes in a tablespace, which
may consist of several files. This is different from, for example,
@code{MyISAM} tables where each table is stored as a separate file.
To create a table in the Innobase format you must specify
@code{TYPE = INNOBASE} in the table creation SQL command:
To create a table in the Innodb format you must specify
@code{TYPE = INNODB} in the table creation SQL command:
@example
CREATE TABLE CUSTOMERS (A INT, B CHAR (20), INDEX (A)) TYPE = INNOBASE;
CREATE TABLE CUSTOMERS (A INT, B CHAR (20), INDEX (A)) TYPE = INNODB;
@end example
A consistent non-locking read is the default locking behavior when you
do a @code{SELECT} from an Innobase table. For a searched update and an
do a @code{SELECT} from an Innodb table. For a searched update and an
insert row level exclusive locking is performed.
You can query the amount of free space in the Innobase tablespace (=
You can query the amount of free space in the Innodb tablespace (=
data files you specified in my.cnf) by issuing the table status command
of @strong{MySQL} for any table you have created with @code{TYPE =
INNOBASE}. Then the amount of free space in the tablespace appears in
INNODB}. Then the amount of free space in the tablespace appears in
the table comment section in the output of SHOW. An example:
@example
......@@ -24334,45 +24346,45 @@ SHOW TABLE STATUS FROM TEST LIKE 'CUSTOMER'
@end example
if you have created a table of name CUSTOMER in a database you have named
TEST. Note that the statistics SHOW gives about Innobase tables
TEST. Note that the statistics SHOW gives about Innodb tables
are only approximate: they are used in SQL optimization. Table and
index reserved sizes in bytes are accurate, though.
NOTE: DROP DATABASE does not currently work for Innobase tables!
NOTE: DROP DATABASE does not currently work for Innodb tables!
You must drop the tables individually.
Note that in addition to your tables, the rollback segment uses space
from the tablespace.
Since Innobase is a multiversioned database, it must keep information
Since Innodb is a multiversioned database, it must keep information
of old versions of rows in the tablespace. This information is stored
in a data structure called a rollback segment, like in Oracle. In contrast
to Oracle, you do not need to configure the rollback segment in any way in
Innobase. If you issue SELECTs, which by default do a consistent read in
Innobase, remember to commit your transaction regularly. Otherwise
Innodb. If you issue SELECTs, which by default do a consistent read in
Innodb, remember to commit your transaction regularly. Otherwise
the rollback segment will grow because it has to preserve the information
needed for further consistent reads in your transaction: in Innobase
needed for further consistent reads in your transaction: in Innodb
all consistent reads within one transaction will see the same timepoint
snapshot of the database: the reads are also 'consistent' with
respect to each other.
Some Innobase errors: If you run out of file space in the tablespace,
Some Innodb errors: If you run out of file space in the tablespace,
you will get the @strong{MySQL} 'Table is full' error. If you want to
make your tablespace bigger, you have to shut down @strong{MySQL} and
add a new datafile specification to @file{my.conf}, to the
@code{innobase_data_file_path} parameter.
@code{innodb_data_file_path} parameter.
A transaction deadlock or a timeout in a lock wait will give 'Table handler
error 1000000'.
Contact information of Innobase Oy, producer of the Innobase engine:
Contact information of Innobase Oy, producer of the Innodb engine:
Website: @uref{http://www.innobase.fi}.
@email{Heikki.Tuuri@@innobase.inet.fi}
@example
phone: 358-9-6969 3250 (office) 358-40-5617367 (mobile)
Innobase Oy Inc.
Innodb Oy Inc.
World Trade Center Helsinki
Aleksanterinkatu 17
P.O.Box 800
......@@ -24380,8 +24392,8 @@ P.O.Box 800
Finland
@end example
@node INNOBASE restrictions, , Using INNOBASE tables, INNOBASE
@subsection Some restrictions on @code{INNOBASE} tables:
@node INNODB restrictions, , Using INNODB tables, INNODB
@subsection Some restrictions on @code{INNODB} tables:
@itemize @bullet
@item
......@@ -24392,9 +24404,9 @@ rows, one by one, which isn't that fast.
@item
The maximum blob size is 8000 bytes.
@item
Before dropping a database with @code{INNOBASE} tables one has to drop
Before dropping a database with @code{INNODB} tables one has to drop
the individual tables first. If one doesn't do that, the space in the
Innobase table space will not be reclaimed.
Innodb table space will not be reclaimed.
@end itemize
@cindex tutorial
......@@ -25336,7 +25348,7 @@ this may be different than what you are used to.)
@item Claws @tab Gwen @tab cat @tab m @tab 1994-03-17 @tab
@item Buffy @tab Harold @tab dog @tab f @tab 1989-05-13 @tab
@item Fang @tab Benny @tab dog @tab m @tab 1990-08-27 @tab
@item Bowser @tab Diane @tab dog @tab m @tab 1998-08-31 @tab 1995-07-29
@item Bowser @tab Diane @tab dog @tab m @tab 1989-08-31 @tab 1995-07-29
@item Chirpy @tab Gwen @tab bird @tab f @tab 1998-09-11 @tab
@item Whistler @tab Gwen @tab bird @tab @tab 1997-12-09 @tab
@item Slim @tab Benny @tab snake @tab m @tab 1996-04-29 @tab
......@@ -27129,7 +27141,7 @@ tables}.
@cindex increasing, speed
@cindex speed, increasing
@cindex databases, replicating
@node Replication, Performance, Server, Top
@node Replication, Fulltext Search, Server, Top
@chapter Replication in MySQL
@menu
......@@ -28108,10 +28120,208 @@ Once you have collected the evidence on the phantom problem, try hard to
isolate it into a separate test case first. Then report the problem to
@email{bugs@@lists.mysql.com} with as much info as possible.
@cindex searching, full-text
@cindex full-text search
@cindex FULLTEXT
@node Fulltext Search, Performance, Replication, Top
@chapter MySQL Full-text Search
Since Version 3.23.23, @strong{MySQL} has support for full-text indexing
and searching. Full-text indexes in @strong{MySQL} are an index of type
@code{FULLTEXT}. @code{FULLTEXT} indexes can be created from @code{VARCHAR}
and @code{TEXT} columns at @code{CREATE TABLE} time or added later with
@code{ALTER TABLE} or @code{CREATE INDEX}. For large datasets, adding
@code{FULLTEXT} index with @code{ALTER TABLE} (or @code{CREATE INDEX}) would
be much faster than inserting rows into the empty table with a @code{FULLTEXT}
index.
Full-text search is performed with the @code{MATCH} function.
@example
mysql> CREATE TABLE t (a VARCHAR(200), b TEXT, FULLTEXT (a,b));
Query OK, 0 rows affected (0.00 sec)
mysql> INSERT INTO t VALUES
-> ('MySQL has now support', 'for full-text search'),
-> ('Full-text indexes', 'are called collections'),
-> ('Only MyISAM tables','support collections'),
-> ('Function MATCH ... AGAINST()','is used to do a search'),
-> ('Full-text search in MySQL', 'implements vector space model');
Query OK, 5 rows affected (0.00 sec)
Records: 5 Duplicates: 0 Warnings: 0
mysql> SELECT * FROM t WHERE MATCH (a,b) AGAINST ('MySQL');
+---------------------------+-------------------------------+
| a | b |
+---------------------------+-------------------------------+
| MySQL has now support | for full-text search |
| Full-text search in MySQL | implements vector-space-model |
+---------------------------+-------------------------------+
2 rows in set (0.00 sec)
mysql> SELECT *,MATCH a,b AGAINST ('collections support') as x FROM t;
+------------------------------+-------------------------------+--------+
| a | b | x |
+------------------------------+-------------------------------+--------+
| MySQL has now support | for full-text search | 0.3834 |
| Full-text indexes | are called collections | 0.3834 |
| Only MyISAM tables | support collections | 0.7668 |
| Function MATCH ... AGAINST() | is used to do a search | 0 |
| Full-text search in MySQL | implements vector space model | 0 |
+------------------------------+-------------------------------+--------+
5 rows in set (0.00 sec)
@end example
The function @code{MATCH} matches a natural language query @code{AGAINST}
a text collection (which is simply the columns that are covered by a
@code{FULLTEXT} index). For every row in a table it returns relevance -
a similarity measure between the text in that row (in the columns that are
part of the collection) and the query. When it is used in a @code{WHERE}
clause (see example above) the rows returned are automatically sorted with
relevance decreasing. Relevance is a non-negative floating-point number.
Zero relevance means no similarity. Relevance is computed based on the
number of words in the row, the number of unique words in that row, the
total number of words in the collection, and the number of documents (rows)
that contain a particular word.
MySQL uses a very simple parser to split text into words. A ``word'' is
any sequence of letters, numbers, @samp{'}, and @samp{_}. Any ``word''
that is present in the stopword list or just too short (3 characters
or less) is ignored.
Every correct word in the collection and in the query is weighted,
according to its significance in the query or collection. This way, a
word that is present in many documents will have lower weight (and may
even have a zero weight), because it has lower semantic value in this
particular collection. Otherwise, if the word is rare, it will receive a
higher weight. The weights of the words are then combined to compute the
relevance of the row.
Such a technique works best with large collections (in fact, it was
carefully tuned this way). For very small tables, word distribution
does not reflect adequately their semantical value, and this model
may sometimes produce bizarre results.
For example, search for the word "search" will produce no results in the
above example. Word "search" is present in more than half of rows, and
as such, is effectively treated as a stopword (that is, with semantical value
zero). It is, really, the desired behavior - a natural language query
should not return every other row in 1GB table.
A word that matches half of rows in a table is less likely to locate relevant
documents. In fact, it will most likely find plenty of irrelevant documents.
We all know this happens far too often when we are trying to find something on
the Internet with a search engine. It is with this reasoning that such rows
have been assigned a low semantical value in @strong{a particular dataset}.
@menu
* Fulltext Fine-tuning::
* Fulltext Features to Appear in MySQL 4.0::
* Fulltext TODO::
@end menu
@node Fulltext Fine-tuning, Fulltext Features to Appear in MySQL 4.0, , Fulltext Search
@section Fine-tuning MySQL Full-text Search
Unfortunately, full-text search has no user-tunable parameters yet,
although adding some is very high on the TODO. However, if you have a
@strong{MySQL} source distribution (@xref{Installing source}.), you can
somewhat alter the full-text search behavior.
Note that full-text search was carefully tuned for the best searching
effectiveness. Modifying the default behavior will, in most cases,
only make the search results worse. Do not alter the @strong{MySQL} sources
unless you know what you are doing!
@itemize
@item
Minimal length of word to be indexed is defined in
@code{myisam/ftdefs.h} file by the line
@example
#define MIN_WORD_LEN 4
@end example
Change it to the value you prefer, recompile @strong{MySQL}, and rebuild
your @code{FULLTEXT} indexes.
@item
The stopword list is defined in @code{myisam/ft_static.c}
Modify it to your taste, recompile @strong{MySQL} and rebuild
your @code{FULLTEXT} indexes.
@item
The 50% threshold is caused by the particular weighting scheme chosen. To
disable it, change the following line in @code{myisam/ftdefs.h}:
@example
#define GWS_IN_USE GWS_PROB
@end example
to
@example
#define GWS_IN_USE GWS_FREQ
@end example
and recompile @strong{MySQL}.
There is no need to rebuild the indexes in this case.
@end itemize
@node Fulltext Features to Appear in MySQL 4.0, Fulltext TODO, Fulltext Fine-tuning, Fulltext Search
@section New Features of Full-text Search to Appear in MySQL 4.0
This section includes a list of the fulltext features that are already
implemented in the 4.0 tree. It explains
@strong{More functions for full-text search} entry of @ref{TODO MySQL 4.0}.
@itemize @bullet
@item @code{REPAIR TABLE} with @code{FULLTEXT} indexes,
@code{ALTER TABLE} with @code{FULLTEXT} indexes, and
@code{OPTIMIZE TABLE} with @code{FULLTEXT} indexes are now
up to 100 times faster.
@item @code{MATCH ... AGAINST} now supports the following
@strong{boolean operators}:
@itemize @bullet
@item @code{+}word means the that word @strong{must} be present in every
row returned.
@item @code{-}word means the that word @strong{must not} be present in every
row returned.
@item @code{<} and @code{>} can be used to decrease and increase word
weight in the query.
@item @code{~} can be used to assign a @strong{negative} weight to a noise
word.
@item @code{*} is a truncation operator.
@end itemize
Boolean search utilizes a more simplistic way of calculating the relevance,
that does not have a 50% threshold.
@item Searches are now up to 2 times faster due to optimized search algorithm.
@item Utility program @code{ft_dump} added for low-level @code{FULLTEXT}
index operations (querying/dumping/statistics).
@end itemize
@node Fulltext TODO, , Fulltext Features to Appear in MySQL 4.0, Fulltext Search
@section Full-text Search TODO
@itemize @bullet
@item Make all operations with @code{FULLTEXT} index @strong{faster}.
@item Support for braces @code{()} in boolean full-text search.
@item Support for "always-index words". They could be any strings
the user wants to treat as words, examples are "C++", "AS/400", "TCP/IP", etc.
@item Support for full-text search in @code{MERGE} tables.
@item Support for multi-byte charsets.
@item Make stopword list to depend of the language of the data.
@item Stemming (dependent of the language of the data, of course).
@item Generic user-supplyable UDF (?) preparser.
@item Make the model more flexible (by adding some adjustable
parameters to @code{FULLTEXT} in @code{CREATE/ALTER TABLE}).
@end itemize
@cindex performance, maximizing
@cindex optimization
@node Performance, MySQL Benchmarks, Replication, Top
@node Performance, MySQL Benchmarks, Fulltext Search, Top
@chapter Getting Maximum Performance from MySQL
Optimization is a complicated task because it ultimately requires
......@@ -28293,6 +28503,10 @@ If you connect using TCP/IP rather than Unix sockets, the result is 7.5%
slower on the same computer. (If you are connection to @code{localhost},
@strong{MySQL} will, by default, use sockets).
@item
If you connect using TCP/IP from another computer over a 100M Ethernet,
things will be 8-11 % slower.
@item
If you compile with @code{--with-debug=full}, then you will loose 20 %
for most queries, but some queries may take substantially longer (The
......@@ -28318,8 +28532,9 @@ get bigger.
Running with @code{--log-bin} makes @strong{[MySQL} 1 % slower.
@item
Compiling without frame pointers @code{-fomit-frame-pointer} with gcc makes
@strong{MySQL} 1% faster.
Compiling on Linux-x86 using gcc without frame pointers
@code{-fomit-frame-pointer} or @code{-fomit-frame-pointer -ffixed-ebp}
@code{mysqld} 1-4% faster.
@end itemize
The @strong{MySQL}-Linux distribution provided by @strong{MySQL AB} used
......@@ -31293,8 +31508,8 @@ mysqldump --all-databases > all_databases.sql
@code{mysqlhotcopy} is a perl script that uses @code{LOCK TABLES},
@code{FLUSH TABLES} and @code{cp} or @code{scp} to quickly make a backup
of a database. It's the fastest way to make a backup of the database,
but it can only be run on the same machine where the database directories
are.
of single tables but it can only be run on the same machine where the
database directories are.
@example
mysqlhotcopy db_name [/path/to/new_directory]
......@@ -40549,11 +40764,10 @@ This is a relatively low traffic list, in comparison with
@menu
* MySQL threads:: MySQL threads
* MySQL full-text search:: MySQL full-text search
* MySQL test suite:: MySQL test suite
@end menu
@node MySQL threads, MySQL full-text search, MySQL internals, MySQL internals
@node MySQL threads, MySQL test suite, , MySQL internals
@section MySQL Threads
The @strong{MySQL} server creates the following threads:
......@@ -40801,7 +41015,7 @@ parameters to @code{FULLTEXT} in @code{CREATE/ALTER TABLE}).
@cindex mysqltest, MySQL Test Suite
@cindex testing mysqld, mysqltest
@node MySQL test suite, , MySQL full-text search, MySQL internals
@node MySQL test suite, , MySQL threads, MySQL internals
@section MySQL Test Suite
Until recently, our main full-coverage test suite was based on proprietary
......@@ -42769,7 +42983,7 @@ A new ISAM library which is tuned for SQL and supports large files.
@item @strong{BerkeleyDB} or @strong{BDB}
Uses the Berkeley DB library from Sleepycat Software to implement
transaction-safe tables.
@item @strong{Innobase}
@item @strong{Innodb}
A transaction-safe table handler that supports row level locking, and many
Oracle-like features.
@c change "three" to "four" above when uncommenting this
......@@ -42845,6 +43059,11 @@ not yet 100% confident in this code.
@appendixsubsec Changes in release 3.23.37
@itemize @bullet
@item
Changed @code{INNOBASE} to @code{INNODB} (because the @code{INNOBASE}
name was already used). Note that all @code{configure} options and
@code{mysqld} start options are now using @code{innodb} instead of
@code{innobase}.
@item
Fixed bug when using indexes on @code{CHAR(255) NULL} columns.
@item
Slave thread will now be started even if @code{master-host} is not set, as
......@@ -48057,7 +48276,7 @@ the @code{.MYD} file.
Better replication.
@item
More functions for full-text search.
@xref{Fulltext features to appear in MySQL 4.0}.
@xref{Fulltext Features to Appear in MySQL 4.0}.
@item
Character set casts and syntax for handling multiple character sets.
@item
......@@ -48181,6 +48400,19 @@ Check why MIT-pthreads @code{ctime()} doesn't work on some FreeBSD systems.
Add an @code{IMAGE} option to @code{LOAD DATA INFILE} to not update
@code{TIMESTAMP} and @code{AUTO_INCREMENT} fields.
@item
Added @code{LOAD DATE INFILE.. UPDATE} syntax.
@itemize @bullet
@item
For tables with primary keys, if the data contains the primary key,
entries matching that primary key are updated from the remainder of the
columns. However, columns MISSING from the incoming data feed are not
touched.
@item
For tables tables with primary keys that are missing some part of the key
in the incoming data stream, or that have no primary key, the feed is
treated as a @code{LOAD DATA INFILE ... REPLACE INTO} is now.
@end itemize
@item
Make @code{LOAD DATA INFILE} understand syntax like:
@example
LOAD DATA INFILE 'file_name.txt' INTO TABLE tbl_name
......@@ -48695,8 +48927,10 @@ setting the @code{DBI_TRACE} environment variable.
On some operating systems, the error log will contain a stack trace if
@code{mysqld} dies unexpectedly. You can use this to find out where (and
maybe why) @code{mysqld} died. @xref{Error log}. To get a stack trace,
you should NOT compile @code{mysqld} with the
@code{-fomit-frame-pointer} option to gcc.
you should NOT compile @code{mysqld} with the @code{-fomit-frame-pointer}
option to gcc. On Linux-x86 you can use
@code{-fomit-frame-pointer -ffixed-ebp} to get both speed and a reasonable
accurate stack trace.
@xref{Compiling for debugging}.
If the error file contains something like the following:
......@@ -912,31 +912,31 @@ dnl END OF MYSQL_CHECK_BDB SECTION
dnl ---------------------------------------------------------------------------
dnl ---------------------------------------------------------------------------
dnl Macro: MYSQL_CHECK_INNOBASE
dnl Sets HAVE_INNOBASE_DB if --with-innobase is used
dnl Macro: MYSQL_CHECK_INNODB
dnl Sets HAVE_INNOBASE_DB if --with-innodb is used
dnl ---------------------------------------------------------------------------
AC_DEFUN([MYSQL_CHECK_INNOBASE], [
AC_ARG_WITH([innobase],
AC_DEFUN([MYSQL_CHECK_INNODB], [
AC_ARG_WITH([innodb],
[\
--with-innobase Use Innobase],
[innobase="$withval"],
[innobase=no])
--with-innodb Use Innodb],
[innodb="$withval"],
[innodb=no])
AC_MSG_CHECKING([for Innobase])
AC_MSG_CHECKING([for Innodb])
have_innobase_db=no
innobase_includes=
innobase_libs=
case "$innobase" in
have_innodb=no
innodb_includes=
innodb_libs=
case "$innodb" in
yes )
AC_MSG_RESULT([Using Innobase])
AC_MSG_RESULT([Using Innodb])
AC_DEFINE(HAVE_INNOBASE_DB)
have_innobase_db="yes"
innobase_includes="-I../innobase/include"
have_innodb="yes"
innodb_includes="-I../innobase/include"
dnl Some libs are listed several times, in order for gcc to sort out
dnl circular references.
innobase_libs="\
innodb_libs="\
../innobase/usr/libusr.a\
../innobase/odbc/libodbc.a\
../innobase/srv/libsrv.a\
......@@ -973,19 +973,19 @@ dnl circular references.
../innobase/os/libos.a\
../innobase/ut/libut.a"
AC_CHECK_LIB(rt, aio_read, [innobase_libs="$innobase_libs -lrt"])
AC_CHECK_LIB(rt, aio_read, [innodb_libs="$innodb_libs -lrt"])
;;
* )
AC_MSG_RESULT([Not using Innobase])
AC_MSG_RESULT([Not using Innodb])
;;
esac
AC_SUBST(innobase_includes)
AC_SUBST(innobase_libs)
AC_SUBST(innodb_includes)
AC_SUBST(innodb_libs)
])
dnl ---------------------------------------------------------------------------
dnl END OF MYSQL_CHECK_INNOBASE SECTION
dnl END OF MYSQL_CHECK_INNODB SECTION
dnl ---------------------------------------------------------------------------
dnl ---------------------------------------------------------------------------
......
......@@ -36,6 +36,8 @@ subdirs = btree build_vxworks build_win32 clib common cxx db db185 \
all:
cd $(bdb_build) && $(MAKE) all
clean:;
# May want to fix this, and MYSQL/configure, to install things
install dvi check installcheck:
......
......@@ -1923,7 +1923,7 @@ AC_MSG_RESULT([default: $default_charset; compiled in: $CHARSETS])
MYSQL_CHECK_BDB
MYSQL_CHECK_INNOBASE
MYSQL_CHECK_INNODB
MYSQL_CHECK_GEMINI
# If we have threads generate some library functions and test programs
......@@ -2024,14 +2024,14 @@ EOF
fi
fi
if test X"$have_innobase_db" = Xyes
if test X"$have_innodb" = Xyes
then
sql_server_dirs="innobase $sql_server_dirs"
echo "CONFIGURING FOR INNOBASE DB"
echo "CONFIGURING FOR INNODB"
(cd innobase && sh ./configure) \
|| AC_MSG_ERROR([could not configure Innobase DB])
|| AC_MSG_ERROR([could not configure INNODB])
echo "END OF INNOBASE DB CONFIGURATION"
echo "END OF INNODB CONFIGURATION"
fi
if test "$with_posix_threads" = "no" -o "$with_mit_threads" = "yes"
......
/******************************************************
Database log
(c) 1995-1997 Innobase Oy
(c) 1995-1997 InnoDB Oy
Created 12/9/1995 Heikki Tuuri
*******************************************************/
......@@ -1886,12 +1886,12 @@ log_group_archive(
if (!ret) {
fprintf(stderr,
"Innobase: Cannot create or open archive log file %s.\n",
"InnoDB: Cannot create or open archive log file %s.\n",
name);
fprintf(stderr, "Innobase: Cannot continue operation.\n"
"Innobase: Check that the log archive directory exists,\n"
"Innobase: you have access rights to it, and\n"
"Innobase: there is space available.\n");
fprintf(stderr, "InnoDB: Cannot continue operation.\n"
"InnoDB: Check that the log archive directory exists,\n"
"InnoDB: you have access rights to it, and\n"
"InnoDB: there is space available.\n");
exit(1);
}
......@@ -2628,7 +2628,7 @@ logs_empty_and_mark_files_at_shutdown(void)
dulint lsn;
ulint arch_log_no;
fprintf(stderr, "Innobase: Starting shutdown...\n");
fprintf(stderr, "InnoDB: Starting shutdown...\n");
/* Wait until the master thread and all other operations are idle: our
algorithm only works if the server is idle at shutdown */
......@@ -2718,7 +2718,7 @@ logs_empty_and_mark_files_at_shutdown(void)
fil_flush_file_spaces(FIL_TABLESPACE);
fprintf(stderr, "Innobase: Shutdown completed\n");
fprintf(stderr, "InnoDB: Shutdown completed\n");
}
/**********************************************************
......
/******************************************************
Recovery
(c) 1997 Innobase Oy
(c) 1997 InnoDB Oy
Created 9/20/1997 Heikki Tuuri
*******************************************************/
......@@ -412,7 +412,7 @@ recv_find_max_checkpoint(
+ LOG_CHECKPOINT_CHECKSUM_1)) {
if (log_debug_writes) {
fprintf(stderr,
"Innobase: Checkpoint in group %lu at %lu invalid, %lu, %lu\n",
"InnoDB: Checkpoint in group %lu at %lu invalid, %lu, %lu\n",
group->id, field,
fold & 0xFFFFFFFF,
mach_read_from_4(buf
......@@ -431,7 +431,7 @@ recv_find_max_checkpoint(
+ LOG_CHECKPOINT_CHECKSUM_2)) {
if (log_debug_writes) {
fprintf(stderr,
"Innobase: Checkpoint in group %lu at %lu invalid, %lu, %lu\n",
"InnoDB: Checkpoint in group %lu at %lu invalid, %lu, %lu\n",
group->id, field,
fold & 0xFFFFFFFF,
mach_read_from_4(buf
......@@ -451,7 +451,7 @@ recv_find_max_checkpoint(
if (log_debug_writes) {
fprintf(stderr,
"Innobase: Checkpoint number %lu found in group %lu\n",
"InnoDB: Checkpoint number %lu found in group %lu\n",
ut_dulint_get_low(checkpoint_no), group->id);
}
......@@ -470,7 +470,7 @@ recv_find_max_checkpoint(
if (*max_group == NULL) {
fprintf(stderr, "Innobase: No valid checkpoint found\n");
fprintf(stderr, "InnoDB: No valid checkpoint found\n");
return(DB_ERROR);
}
......@@ -866,7 +866,7 @@ recv_recover_page(
if (log_debug_writes) {
fprintf(stderr,
"Innobase: Applying log rec type %lu len %lu to space %lu page no %lu\n",
"InnoDB: Applying log rec type %lu len %lu to space %lu page no %lu\n",
(ulint)recv->type, recv->len, recv_addr->space,
recv_addr->page_no);
}
......@@ -1015,7 +1015,7 @@ recv_apply_hashed_log_recs(
if (recv_addr->state == RECV_NOT_PROCESSED) {
if (!has_printed) {
fprintf(stderr,
"Innobase: Starting an apply batch of log records to the database...\n");
"InnoDB: Starting an apply batch of log records to the database...\n");
has_printed = TRUE;
}
......@@ -1082,7 +1082,7 @@ recv_apply_hashed_log_recs(
recv_sys_empty_hash();
if (has_printed) {
fprintf(stderr, "Innobase: Apply batch completed\n");
fprintf(stderr, "InnoDB: Apply batch completed\n");
}
mutex_exit(&(recv_sys->mutex));
......@@ -1454,7 +1454,7 @@ recv_parse_log_recs(
if (log_debug_writes) {
fprintf(stderr,
"Innobase: Parsed a single log rec type %lu len %lu space %lu page no %lu\n",
"InnoDB: Parsed a single log rec type %lu len %lu space %lu page no %lu\n",
(ulint)type, len, space, page_no);
}
......@@ -1503,7 +1503,7 @@ recv_parse_log_recs(
if (log_debug_writes) {
fprintf(stderr,
"Innobase: Parsed a multi log rec type %lu len %lu space %lu page no %lu\n",
"InnoDB: Parsed a multi log rec type %lu len %lu space %lu page no %lu\n",
(ulint)type, len, space, page_no);
}
......@@ -1824,7 +1824,7 @@ recv_scan_log_recs(
if (more_data) {
fprintf(stderr,
"Innobase: Doing recovery: scanned up to log sequence number %lu %lu\n",
"InnoDB: Doing recovery: scanned up to log sequence number %lu %lu\n",
ut_dulint_get_high(*group_scanned_lsn),
ut_dulint_get_low(*group_scanned_lsn));
......@@ -1877,7 +1877,7 @@ recv_group_scan_log_recs(
if (log_debug_writes) {
fprintf(stderr,
"Innobase: Scanned group %lu up to log sequence number %lu %lu\n",
"InnoDB: Scanned group %lu up to log sequence number %lu %lu\n",
group->id,
ut_dulint_get_high(*group_scanned_lsn),
ut_dulint_get_low(*group_scanned_lsn));
......@@ -1977,11 +1977,11 @@ recv_recovery_from_checkpoint_start(
|| ut_dulint_cmp(checkpoint_lsn, min_flushed_lsn) != 0) {
fprintf(stderr,
"Innobase: Database was not shut down normally.\n"
"Innobase: Starting recovery from log files...\n");
"InnoDB: Database was not shut down normally.\n"
"InnoDB: Starting recovery from log files...\n");
fprintf(stderr,
"Innobase: Starting log scan based on checkpoint at\n"
"Innobase: log sequence number %lu %lu\n",
"InnoDB: Starting log scan based on checkpoint at\n"
"InnoDB: log sequence number %lu %lu\n",
ut_dulint_get_high(checkpoint_lsn),
ut_dulint_get_low(checkpoint_lsn));
}
......@@ -2134,7 +2134,7 @@ recv_recovery_from_checkpoint_finish(void)
if (log_debug_writes) {
fprintf(stderr,
"Innobase: Log records applied to the database\n");
"InnoDB: Log records applied to the database\n");
}
/* Free the resources of the recovery system */
......@@ -2246,12 +2246,12 @@ log_group_recover_from_archive_file(
fil_release_right_to_open();
ask_again:
fprintf(stderr,
"Innobase: Do you want to copy additional archived log files\n"
"Innobase: to the directory\n");
"InnoDB: Do you want to copy additional archived log files\n"
"InnoDB: to the directory\n");
fprintf(stderr,
"Innobase: or were these all the files needed in recovery?\n");
"InnoDB: or were these all the files needed in recovery?\n");
fprintf(stderr,
"Innobase: (Y == copy more files; N == this is all)?");
"InnoDB: (Y == copy more files; N == this is all)?");
input_char = getchar();
......@@ -2271,13 +2271,13 @@ log_group_recover_from_archive_file(
ut_a(file_size_high == 0);
fprintf(stderr, "Innobase: Opened archived log file %s\n", name);
fprintf(stderr, "InnoDB: Opened archived log file %s\n", name);
ret = os_file_close(file_handle);
if (file_size < LOG_FILE_HDR_SIZE) {
fprintf(stderr,
"Innobase: Archive file header incomplete %s\n", name);
"InnoDB: Archive file header incomplete %s\n", name);
return(TRUE);
}
......@@ -2302,14 +2302,14 @@ log_group_recover_from_archive_file(
|| mach_read_from_4(buf + LOG_FILE_NO)
!= group->archived_file_no) {
fprintf(stderr,
"Innobase: Archive file header inconsistent %s\n", name);
"InnoDB: Archive file header inconsistent %s\n", name);
return(TRUE);
}
if (!mach_read_from_4(buf + LOG_FILE_ARCH_COMPLETED)) {
fprintf(stderr,
"Innobase: Archive file not completely written %s\n", name);
"InnoDB: Archive file not completely written %s\n", name);
return(TRUE);
}
......@@ -2321,7 +2321,7 @@ log_group_recover_from_archive_file(
if (ut_dulint_cmp(recv_sys->parse_start_lsn, start_lsn) < 0) {
fprintf(stderr,
"Innobase: Archive log file %s starts from too big a lsn\n",
"InnoDB: Archive log file %s starts from too big a lsn\n",
name);
return(TRUE);
}
......@@ -2332,7 +2332,7 @@ log_group_recover_from_archive_file(
if (ut_dulint_cmp(recv_sys->scanned_lsn, start_lsn) != 0) {
fprintf(stderr,
"Innobase: Archive log file %s starts from a wrong lsn\n",
"InnoDB: Archive log file %s starts from a wrong lsn\n",
name);
return(TRUE);
}
......@@ -2354,7 +2354,7 @@ log_group_recover_from_archive_file(
if (log_debug_writes) {
fprintf(stderr,
"Innobase: Archive read starting at lsn %lu %lu, len %lu from file %s\n",
"InnoDB: Archive read starting at lsn %lu %lu, len %lu from file %s\n",
ut_dulint_get_high(start_lsn),
ut_dulint_get_low(start_lsn),
len, name);
......@@ -2375,7 +2375,7 @@ log_group_recover_from_archive_file(
if (ret) {
fprintf(stderr,
"Innobase: Archive log file %s does not scan right\n",
"InnoDB: Archive log file %s does not scan right\n",
name);
return(TRUE);
}
......@@ -2435,7 +2435,7 @@ recv_recovery_from_archive_start(
if (!group) {
fprintf(stderr,
"Innobase: There is no log group defined with id %lu!\n",
"InnoDB: There is no log group defined with id %lu!\n",
group_id);
return(DB_ERROR);
}
......
......@@ -7,7 +7,7 @@ thread is usually allocated per processor. Win32
documentation does not know any UMS threads, which suggests
that the concept is internal to SQL Server 7. It may mean that
SQL Server 7 does all the scheduling of threads itself, even
in i/o waits. We should maybe modify Innobase to use the same
in i/o waits. We should maybe modify InnoDB to use the same
technique, because thread switches within NT may be too slow.
SQL Server 7 also mentions fibers, which are cooperatively
......@@ -20,7 +20,7 @@ Windows 2000 will have something called thread pooling
Another possibility could be to use some very fast user space
thread library. This might confuse NT though.
(c) 1995 Innobase Oy
(c) 1995 InnoDB Oy
Created 10/8/1995 Heikki Tuuri
*******************************************************/
......@@ -1093,7 +1093,7 @@ srv_read_init_val(
return(DB_ERROR);
}
printf("Error in Innobase booting: keyword %s not found\n",
printf("Error in InnoDB booting: keyword %s not found\n",
keyword);
printf("from the initfile!\n");
......@@ -1114,7 +1114,7 @@ srv_read_init_val(
}
printf(
"Error in Innobase booting: could not read first value after %s\n",
"Error in InnoDB booting: could not read first value after %s\n",
keyword);
printf("from the initfile!\n");
......@@ -1140,7 +1140,7 @@ srv_read_init_val(
}
printf(
"Error in Innobase booting: could not read second value after %s\n",
"Error in InnoDB booting: could not read second value after %s\n",
keyword);
printf("from the initfile!\n");
......@@ -1156,7 +1156,7 @@ srv_read_init_val(
}
printf(
"Error in Innobase booting: numerical value too big after %s\n",
"Error in InnoDB booting: numerical value too big after %s\n",
keyword);
printf("in the initfile!\n");
......@@ -1173,7 +1173,7 @@ srv_read_init_val(
}
printf(
"Error in Innobase booting: numerical value too big after %s\n",
"Error in InnoDB booting: numerical value too big after %s\n",
keyword);
printf("in the initfile!\n");
......@@ -1523,7 +1523,7 @@ srv_general_init(void)
}
/*************************************************************************
Normalizes init parameter values to use units we use inside Innobase. */
Normalizes init parameter values to use units we use inside InnoDB. */
static
ulint
srv_normalize_init_values(void)
......@@ -1552,7 +1552,7 @@ srv_normalize_init_values(void)
}
/*************************************************************************
Boots the Innobase server. */
Boots the InnoDB server. */
ulint
srv_boot(void)
......@@ -1562,7 +1562,7 @@ srv_boot(void)
ulint err;
/* Transform the init parameter values given by MySQL to
use units we use inside Innobase: */
use units we use inside InnoDB: */
err = srv_normalize_init_values();
......@@ -1797,7 +1797,7 @@ srv_lock_timeout_monitor_thread(
}
/***********************************************************************
Tells the Innobase server that there has been activity in the database
Tells the InnoDB server that there has been activity in the database
and wakes up the master thread if it is suspended (not sleeping). Used
in the MySQL interface. Note that there is a small chance that the master
thread stays suspended (we do not protect our operation with the kernel
......
/************************************************************************
Starts the Innobase database server
Starts the InnoDB database server
(c) 1996-2000 Innobase Oy
(c) 1996-2000 InnoDB Oy
Created 2/16/1996 Heikki Tuuri
*************************************************************************/
......@@ -207,7 +207,7 @@ open_or_create_log_file(
if (ret == FALSE) {
if (os_file_get_last_error() != OS_FILE_ALREADY_EXISTS) {
fprintf(stderr,
"Innobase: Error in creating or opening %s\n", name);
"InnoDB: Error in creating or opening %s\n", name);
return(DB_ERROR);
}
......@@ -216,7 +216,7 @@ open_or_create_log_file(
name, OS_FILE_OPEN, OS_FILE_AIO, &ret);
if (!ret) {
fprintf(stderr,
"Innobase: Error in opening %s\n", name);
"InnoDB: Error in opening %s\n", name);
return(DB_ERROR);
}
......@@ -227,8 +227,8 @@ open_or_create_log_file(
if (size != UNIV_PAGE_SIZE * srv_log_file_size
|| size_high != 0) {
fprintf(stderr,
"Innobase: Error: log file %s is of different size\n"
"Innobase: than specified in the .cnf file!\n", name);
"InnoDB: Error: log file %s is of different size\n"
"InnoDB: than specified in the .cnf file!\n", name);
return(DB_ERROR);
}
......@@ -236,16 +236,16 @@ open_or_create_log_file(
*log_file_created = TRUE;
fprintf(stderr,
"Innobase: Log file %s did not exist: new to be created\n",
"InnoDB: Log file %s did not exist: new to be created\n",
name);
printf("Innobase: Setting log file %s size to %lu\n",
printf("InnoDB: Setting log file %s size to %lu\n",
name, UNIV_PAGE_SIZE * srv_log_file_size);
ret = os_file_set_size(name, files[i],
UNIV_PAGE_SIZE * srv_log_file_size, 0);
if (!ret) {
fprintf(stderr,
"Innobase: Error in creating %s: probably out of disk space\n",
"InnoDB: Error in creating %s: probably out of disk space\n",
name);
return(DB_ERROR);
......@@ -335,7 +335,7 @@ open_or_create_data_files(
if (os_file_get_last_error() !=
OS_FILE_ALREADY_EXISTS) {
fprintf(stderr,
"Innobase: Error in creating or opening %s\n",
"InnoDB: Error in creating or opening %s\n",
name);
return(DB_ERROR);
......@@ -343,9 +343,9 @@ open_or_create_data_files(
if (one_created) {
fprintf(stderr,
"Innobase: Error: data files can only be added at the end\n");
"InnoDB: Error: data files can only be added at the end\n");
fprintf(stderr,
"Innobase: of a tablespace, but data file %s existed beforehand.\n",
"InnoDB: of a tablespace, but data file %s existed beforehand.\n",
name);
return(DB_ERROR);
}
......@@ -355,7 +355,7 @@ open_or_create_data_files(
if (!ret) {
fprintf(stderr,
"Innobase: Error in opening %s\n", name);
"InnoDB: Error in opening %s\n", name);
return(DB_ERROR);
}
......@@ -366,8 +366,8 @@ open_or_create_data_files(
if (size != UNIV_PAGE_SIZE * srv_data_file_sizes[i]
|| size_high != 0) {
fprintf(stderr,
"Innobase: Error: data file %s is of different size\n"
"Innobase: than specified in the .cnf file!\n", name);
"InnoDB: Error: data file %s is of different size\n"
"InnoDB: than specified in the .cnf file!\n", name);
return(DB_ERROR);
}
......@@ -382,26 +382,26 @@ open_or_create_data_files(
if (i > 0) {
fprintf(stderr,
"Innobase: Data file %s did not exist: new to be created\n", name);
"InnoDB: Data file %s did not exist: new to be created\n", name);
} else {
fprintf(stderr,
"Innobase: The first specified data file %s did not exist:\n"
"Innobase: a new database to be created!\n", name);
"InnoDB: The first specified data file %s did not exist:\n"
"InnoDB: a new database to be created!\n", name);
*create_new_db = TRUE;
}
printf("Innobase: Setting file %s size to %lu\n",
printf("InnoDB: Setting file %s size to %lu\n",
name, UNIV_PAGE_SIZE * srv_data_file_sizes[i]);
printf(
"Innobase: Database physically writes the file full: wait...\n");
"InnoDB: Database physically writes the file full: wait...\n");
ret = os_file_set_size(name, files[i],
UNIV_PAGE_SIZE * srv_data_file_sizes[i], 0);
if (!ret) {
fprintf(stderr,
"Innobase: Error in creating %s: probably out of disk space\n", name);
"InnoDB: Error in creating %s: probably out of disk space\n", name);
return(DB_ERROR);
}
......@@ -503,7 +503,7 @@ test_measure_cont(
}
/********************************************************************
Starts Innobase and creates a new database if database files
Starts InnoDB and creates a new database if database files
are not found and the user wants. Server parameters are
read from a file of name "srv_init" in the ib_home directory. */
......@@ -606,7 +606,7 @@ innobase_start_or_create_for_mysql(void)
&sum_of_new_sizes);
if (err != DB_SUCCESS) {
fprintf(stderr, "Innobase: Could not open data files\n");
fprintf(stderr, "InnoDB: Could not open data files\n");
return((int) err);
}
......@@ -634,12 +634,12 @@ innobase_start_or_create_for_mysql(void)
if ((log_opened && create_new_db)
|| (log_opened && log_created)) {
fprintf(stderr,
"Innobase: Error: all log files must be created at the same time.\n"
"Innobase: If you want bigger or smaller log files,\n"
"Innobase: shut down the database and make sure there\n"
"Innobase: were no errors in shutdown.\n"
"Innobase: Then delete the existing log files. Edit the .cnf file\n"
"Innobase: and start the database again.\n");
"InnoDB: Error: all log files must be created at the same time.\n"
"InnoDB: If you want bigger or smaller log files,\n"
"InnoDB: shut down the database and make sure there\n"
"InnoDB: were no errors in shutdown.\n"
"InnoDB: Then delete the existing log files. Edit the .cnf file\n"
"InnoDB: and start the database again.\n");
return(DB_ERROR);
}
......@@ -652,9 +652,9 @@ innobase_start_or_create_for_mysql(void)
if (ut_dulint_cmp(max_flushed_lsn, min_flushed_lsn) != 0
|| max_arch_log_no != min_arch_log_no) {
fprintf(stderr,
"Innobase: Cannot initialize created log files because\n"
"Innobase: data files were not in sync with each other\n"
"Innobase: or the data files are corrupt./n");
"InnoDB: Cannot initialize created log files because\n"
"InnoDB: data files were not in sync with each other\n"
"InnoDB: or the data files are corrupt./n");
return(DB_ERROR);
}
......@@ -662,11 +662,11 @@ innobase_start_or_create_for_mysql(void)
if (ut_dulint_cmp(max_flushed_lsn, ut_dulint_create(0, 1000))
< 0) {
fprintf(stderr,
"Innobase: Cannot initialize created log files because\n"
"Innobase: data files are corrupt, or new data files were\n"
"Innobase: created when the database was started previous\n"
"Innobase: time but the database was not shut down\n"
"Innobase: normally after that.\n");
"InnoDB: Cannot initialize created log files because\n"
"InnoDB: data files are corrupt, or new data files were\n"
"InnoDB: created when the database was started previous\n"
"InnoDB: time but the database was not shut down\n"
"InnoDB: normally after that.\n");
return(DB_ERROR);
}
......@@ -694,7 +694,7 @@ innobase_start_or_create_for_mysql(void)
} else if (srv_archive_recovery) {
fprintf(stderr,
"Innobase: Starting archive recovery from a backup...\n");
"InnoDB: Starting archive recovery from a backup...\n");
err = recv_recovery_from_archive_start(
min_flushed_lsn,
......@@ -776,7 +776,7 @@ innobase_start_or_create_for_mysql(void)
/* Create the thread which watches the timeouts for lock waits */
os_thread_create(&srv_lock_timeout_monitor_thread, NULL,
thread_ids + 2 + SRV_MAX_N_IO_THREADS);
fprintf(stderr, "Innobase: Started\n");
fprintf(stderr, "InnoDB: Started\n");
srv_was_started = TRUE;
srv_is_being_started = FALSE;
......@@ -789,7 +789,7 @@ innobase_start_or_create_for_mysql(void)
}
/********************************************************************
Shuts down the Innobase database. */
Shuts down the InnoDB database. */
int
innobase_shutdown_for_mysql(void)
......@@ -799,7 +799,7 @@ innobase_shutdown_for_mysql(void)
if (!srv_was_started) {
if (srv_is_being_started) {
fprintf(stderr,
"Innobase: Warning: shutting down not properly started database\n");
"InnoDB: Warning: shutting down not properly started database\n");
}
return(DB_SUCCESS);
}
......
-- require r/have_innobase.require
show variables like "have_innobase";
-- require r/have_innodb.require
show variables like "have_innodb";
......@@ -193,7 +193,7 @@ then
fi
if $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables \
--basedir=$basedir --datadir=$ldata --skip-innobase --skip-bdb --skip-gemini << END_OF_DATA
--basedir=$basedir --datadir=$ldata --skip-innodb --skip-bdb --skip-gemini << END_OF_DATA
use mysql;
$c_d
$i_d
......
......@@ -20,9 +20,31 @@ TZ=GMT-3; export TZ # for UNIX_TIMESTAMP tests to work
PATH=/bin:/usr/bin:/usr/local/bin:/usr/bsd:/usr/X11R6/bin
# Standard functions
which ()
{
DIRS=`echo $PATH | tr ":" " "`
for file
do
for dir in $DIRS
do
if test -f $dir/$file
then
echo "$dir/$file"
continue 2
fi
done
echo "which: no $file in ($PATH)"
exit 1
done
}
# No paths below as we can't be sure where the program is!
BASENAME=`which basename | head -1`
DIFF=`which diff | head -1`
CAT=cat
CUT=cut
TAIL=tail
......@@ -268,6 +290,20 @@ prompt_user ()
read unused
}
show_failed_diff ()
{
reject_file=r/$1.reject
result_file=r/$1.result
if [ -x "$DIFF" ] && [ -f $reject_file ]
then
echo "Below are the diffs between actual and expected results:"
echo "-------------------------------------------------------"
$DIFF -u $result_file $reject_file
echo "-------------------------------------------------------"
echo "Please e-mail the above, along with the output of mysqlbug"
echo "and any other relevant info to bugs@lists.mysql.com"
fi
}
error () {
$ECHO "Error: $1"
......@@ -399,7 +435,7 @@ start_master()
--core \
--tmpdir=$MYSQL_TMP_DIR \
--language=english \
--innobase_data_file_path=ibdata1:50M \
--innodb_data_file_path=ibdata1:50M \
$SMALL_SERVER \
$EXTRA_MASTER_OPT $EXTRA_MASTER_MYSQLD_OPT"
if [ x$DO_DDD = x1 ]
......@@ -454,7 +490,7 @@ start_slave()
--core \
--tmpdir=$MYSQL_TMP_DIR \
--language=english \
--skip-innobase \
--skip-innodb \
$SMALL_SERVER \
$EXTRA_SLAVE_OPT $EXTRA_SLAVE_MYSQLD_OPT"
if [ x$DO_DDD = x1 ]
......@@ -660,6 +696,7 @@ run_testcase ()
$ECHO "$RES$RES_SPACE [ fail ]"
$ECHO
error_is
show_failed_diff $tname
$ECHO
if [ x$FORCE != x1 ] ; then
$ECHO "Aborting. To continue, re-run with '--force'."
......
......@@ -6,8 +6,8 @@ sum(length(word))
(@id := id) - id
0
Master_Host Master_User Master_Port Connect_retry Log_File Pos Slave_Running Replicate_do_db Replicate_ignore_db Last_errno Last_error Skip_counter
127.0.0.1 root 9306 1 master-bin.001 1729137 No 1053 Slave: query ' update t1 set n = n + 1' partially completed on the master and was aborted. There is a chance that your master is inconsistent at this point. If you are sure that your master is ok, run this query manually on the slave and then restart the slave with SET SQL_SLAVE_SKIP_COUNTER=1; SLAVE START; 0
127.0.0.1 root 9306 1 master-bin.001 939 No 1053 Slave: query ' update t1 set n = n + get_lock('crash_lock', 2)' partially completed on the master and was aborted. There is a chance that your master is inconsistent at this point. If you are sure that your master is ok, run this query manually on the slave and then restart the slave with SET SQL_SLAVE_SKIP_COUNTER=1; SLAVE START; 0
count(*)
30000
10
n
3456
drop table if exists t1;
create table t1(n int not null, key(n)) delay_key_write = 1;
let $1=10000;
let $1=100;
while ($1)
{
eval insert into t1 values($1);
......
-- source include/have_innobase.inc
-- source include/have_innodb.inc
#
# Small basic test with ignore
......
......@@ -30,7 +30,7 @@ reset slave;
connection master;
drop table if exists t1,t2;
create table t1(n int);
let $1=30000;
let $1=10;
while ($1)
{
eval insert into t1 values($1);
......@@ -39,10 +39,17 @@ while ($1)
create table t2(id int);
insert into t2 values(connection_id());
save_master_pos;
send update t1 set n = n + 1;
connection master1;
#avoid generating result
create temporary table t1_temp(n int);
insert into t1_temp select get_lock('crash_lock', 1) from t2;
connection master;
send update t1 set n = n + get_lock('crash_lock', 2);
connection master1;
sleep 2;
select (@id := id) - id from t2;
sleep 0.1;
kill @id;
drop table t2;
connection master;
......
......@@ -286,7 +286,7 @@ fi
echo "Installing all prepared tables"
if eval "$execdir/mysqld $defaults --bootstrap --skip-grant-tables \
--basedir=$basedir --datadir=$ldata --skip-innobase --skip-gemini --skip-bdb $args" << END_OF_DATA
--basedir=$basedir --datadir=$ldata --skip-innodb --skip-gemini --skip-bdb $args" << END_OF_DATA
use mysql;
$c_d
$i_d
......
......@@ -9,7 +9,7 @@ use DBI;
=head1 NAME
mysqlhotcopy - fast on-line hot-backup utility for local MySQL databases
mysqlhotcopy - fast on-line hot-backup utility for local MySQL databases and tables
=head1 SYNOPSIS
......@@ -36,7 +36,7 @@ WARNING: THIS IS VERY MUCH A FIRST-CUT ALPHA. Comments/patches welcome.
# Documentation continued at end of file
my $VERSION = "1.11";
my $VERSION = "1.12";
my $opt_tmpdir = $ENV{TMPDIR} || "/tmp";
......@@ -44,7 +44,7 @@ my $OPTIONS = <<"_OPTIONS";
$0 Ver $VERSION
Usage: $0 db_name [new_db_name | directory]
Usage: $0 db_name[./table_regex/] [new_db_name | directory]
-?, --help display this helpscreen and exit
-u, --user=# user for database login if not current user
......@@ -52,9 +52,9 @@ Usage: $0 db_name [new_db_name | directory]
-P, --port=# port to use when connecting to local server
-S, --socket=# socket to use when connecting to local server
--allowold don't abort if target already exists (rename it _old)
--keepold don't delete previous (now renamed) target when done
--noindices don't include full index files in copy
--allowold don\'t abort if target already exists (rename it _old)
--keepold don\'t delete previous (now renamed) target when done
--noindices don\'t include full index files in copy
--method=# method for copy (only "cp" currently supported)
-q, --quiet be silent except for errors
......@@ -69,7 +69,7 @@ Usage: $0 db_name [new_db_name | directory]
--resetslave reset the master.info once all tables are locked
--tmpdir=# temporary directory (instead of $opt_tmpdir)
Try 'perldoc $0 for more complete documentation'
Try \'perldoc $0 for more complete documentation\'
_OPTIONS
sub usage {
......@@ -186,18 +186,20 @@ $datadir =~ s:/$::;
# --- get target path ---
my ($tgt_dirname, $to_other_database);
$to_other_database=0;
if ($tgt_name =~ m:^\w+$: && @db_desc <= 1)
if (defined($tgt_name) && $tgt_name =~ m:^\w+$: && @db_desc <= 1)
{
$tgt_dirname = "$datadir/$tgt_name";
$to_other_database=1;
}
elsif ($tgt_name =~ m:/: || $tgt_name eq '.') {
elsif (defined($tgt_name) && ($tgt_name =~ m:/: || $tgt_name eq '.')) {
$tgt_dirname = $tgt_name;
}
elsif ( $opt{suffix} ) {
print "copy suffix $opt{suffix}\n" unless $opt{quiet};
print "Using copy suffix '$opt{suffix}'\n" unless $opt{quiet};
}
else {
else
{
$tgt_name="" if (!defined($tgt_name));
die "Target '$tgt_name' doesn't look like a database name or directory path.\n";
}
......@@ -277,7 +279,7 @@ foreach my $rdb ( @db_desc ) {
my @targets = ();
if (length $tgt_name ) {
if (defined($tgt_name) && length $tgt_name ) {
# explicit destination directory specified
# GNU `cp -r` error message
......@@ -329,8 +331,11 @@ if ($opt{method} =~ /^cp\b/)
push @existing, $rdb->{target} if ( -d $rdb->{target} );
}
die "Can't hotcopy to '", join( "','", @existing ), "' because already exist and --allowold option not given.\n"
if ( @existing && !$opt{allowold} );
if ( @existing && !$opt{allowold} )
{
$dbh->disconnect();
die "Can't hotcopy to '", join( "','", @existing ), "' because directory\nalready exist and the --allowold option was not given.\n"
}
}
retire_directory( @existing ) if ( @existing );
......@@ -778,7 +783,7 @@ not copied by the previous subsets.
newdb is either another not existing database or a full path to a directory
where we can create a directory 'db'
Add option to lock each table in turn for people who don't need
Add option to lock each table in turn for people who don\'t need
cross-table integrity.
Add option to FLUSH STATUS just before UNLOCK TABLES.
......
......@@ -52,6 +52,7 @@ parse_arguments() {
--core-file-size=*) core_file_size=`echo "$arg" | sed -e "s;--core_file_size=;;"` ;;
--timezone=*) TZ=`echo "$arg" | sed -e "s;--timezone=;;"` ; export TZ; ;;
--mysqld=*) MYSQLD=`echo "$arg" | sed -e "s;--mysqld=;;"` ;;
--mysqld-version=*) MYSQLD=mysqld-`echo "$arg" | sed -e "s;--mysqld-version=;;"` ;;
*)
if test -n "$pick_args"
then
......@@ -92,7 +93,14 @@ fi
MYSQL_UNIX_PORT=${MYSQL_UNIX_PORT:-@MYSQL_UNIX_ADDR@}
MYSQL_TCP_PORT=${MYSQL_TCP_PORT:-@MYSQL_TCP_PORT@}
user=@MYSQLD_USER@
MYSQLD=mysqld
# Use the mysqld-max binary by default if the user doesn't specify a binary
if test -x $ledir/mysqld-max
then
MYSQLD=mysqld-max
else
MYSQLD=mysqld
fi
# these rely on $DATADIR by default, so we'll set them later on
pid_file=
......
Testing server 'MySQL 3.23.13a alpha' at 2000-03-15 2:22:09
ATIS table test
Creating tables
Time for create_table (28): 7 wallclock secs ( 6.93 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (9768): 12 wallclock secs (11.97 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data
Time for select_simple_join (500): 7 wallclock secs ( 7.47 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_join (200): 51 wallclock secs (50.15 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_distinct (800): 33 wallclock secs (33.34 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_group (2800): 33 wallclock secs (33.61 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Removing tables
Time to drop_table (28): 1 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 144 wallclock secs (143.53 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Benchmark DBD suite: 2.7
Date of test: 2000-03-15 1:51:13
Running tests on: Windows 98 [Version 4.10.1998]
Arguments:
Comments: AMD K6, 400mz, 128M, 16M key_cache
Limits from:
Server version: MySQL 3.23.13a alpha
alter-table: Total time: 1855 wallclock secs (1854.89 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
ATIS: Total time: 144 wallclock secs (143.53 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
big-tables: Total time: 151 wallclock secs (150.66 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
connect: Total time: 369 wallclock secs (368.94 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
create: Total time: 5141 wallclock secs (5140.97 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
insert: Estimated total time: 8358 wallclock secs (8356.07 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
select: Total time: 1294 wallclock secs (1293.83 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
wisconsin: Total time: 63 wallclock secs (62.40 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
All 8 test executed successfully
Tests with estimated time have a + at end of line
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 1046 1046.38 0.00 0.00 992
alter_table_drop 773 772.97 0.00 0.00 496
connect 52 51.85 0.00 0.00 10000
connect+select_1_row 72 71.95 0.00 0.00 10000
connect+select_simple 67 66.90 0.00 0.00 10000
count 55 54.87 0.00 0.00 100
count_on_key 640 639.71 0.00 0.00 50100
create+drop 1433 1433.28 0.00 0.00 10000
create_MANY_tables 1510 1510.83 0.00 0.00 5000
create_index 17 16.70 0.00 0.00 8
create_key+drop 1457 1456.57 0.00 0.00 10000
create_table 8 7.92 0.00 0.00 31
delete_big 23 22.25 0.00 0.00 13
delete_big_many_keys 112 112.11 0.00 0.00 2
delete_key 0 0.82 0.00 0.00 500
drop_index 17 16.64 0.00 0.00 8
drop_table 1 0.06 0.00 0.00 28
drop_table_when_MANY_tables 160 159.83 0.00 0.00 5000
insert 510 508.11 0.00 0.00 350768
insert_duplicates 91 91.06 0.00 0.00 300000
insert_key 364 363.72 0.00 0.00 100000
insert_many_fields 25 24.71 0.00 0.00 2000
min_max 33 32.90 0.00 0.00 60
min_max_on_key 339 339.56 0.00 0.00 73000
multiple_value_insert 11 11.64 0.00 0.00 100000
order_by 249 248.87 0.00 0.00 10
order_by_key 175 174.66 0.00 0.00 10
outer_join 146 145.66 0.00 0.00 10
outer_join_found 144 144.40 0.00 0.00 10
outer_join_not_found 606 605.55 0.00 0.00 500 +
outer_join_on_key 63 63.33 0.00 0.00 10
select_1_row 16 15.99 0.00 0.00 10000
select_2_rows 19 18.89 0.00 0.00 10000
select_big 304 306.16 0.00 0.00 10080
select_diff_key 241 240.63 0.00 0.00 500
select_distinct 33 33.34 0.00 0.00 800
select_group 91 91.83 0.00 0.00 2911
select_group_when_MANY_tables 581 580.46 0.00 0.00 5000
select_join 51 50.15 0.00 0.00 200
select_key 540 540.35 0.00 0.00 200000
select_key_prefix 542 542.23 0.00 0.00 200000
select_many_fields 124 124.24 0.00 0.00 2000
select_range 423 423.21 0.00 0.00 25420
select_range_prefix 66 66.62 0.00 0.00 25010
select_simple 12 12.20 0.00 0.00 10000
select_simple_join 7 7.47 0.00 0.00 500
update_big 84 83.33 0.00 0.00 500
update_of_key 59 59.21 0.00 0.00 756
update_of_key_big 38 38.07 0.00 0.00 501
update_with_key 432 432.20 0.00 0.00 100000
wisc_benchmark 17 17.08 0.00 0.00 114
TOTALS 13879 13879.47 0.00 0.00 1642948 +
Testing server 'MySQL 3.23.13a alpha' at 2000-03-15 1:51:14
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 1 wallclock secs ( 1.09 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for alter_table_add (992): 1046 wallclock secs (1046.38 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for create_index (8): 17 wallclock secs (16.70 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop_index (8): 17 wallclock secs (16.64 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for alter_table_drop (496): 773 wallclock secs (772.97 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 1855 wallclock secs (1854.89 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.13a alpha' at 2000-03-15 2:24:33
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
Time to select_many_fields(1000): 55 wallclock secs (55.36 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select all_fields from table with 1 record
Time to select_many_fields(1000): 69 wallclock secs (68.88 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert VALUES()
Time to insert_many_fields(1000): 6 wallclock secs ( 6.20 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert (all_fields) VALUES()
Time to insert_many_fields(1000): 19 wallclock secs (18.51 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 151 wallclock secs (150.66 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.13a alpha' at 2000-03-15 2:27:04
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 52 wallclock secs (51.85 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test connect/simple select/disconnect
Time for connect+select_simple (10000): 67 wallclock secs (66.90 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test simple select
Time for select_simple (10000): 12 wallclock secs (12.20 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing connect/select 1 row from table/disconnect
Time to connect+select_1_row (10000): 72 wallclock secs (71.95 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 1 row from table
Time to select_1_row (10000): 16 wallclock secs (15.99 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 2 rows from table
Time to select_2_rows (10000): 19 wallclock secs (18.89 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing retrieval of big records (65000 bytes)
Time to select_big (10000): 130 wallclock secs (130.56 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 369 wallclock secs (368.94 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.13a alpha' at 2000-03-15 2:33:13
Testing the speed of creating and droping tables
Testing with 5000 tables and 10000 loop count
Testing create of tables
Time for create_MANY_tables (5000): 1510 wallclock secs (1510.83 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Accessing tables
Time to select_group_when_MANY_tables (5000): 581 wallclock secs (580.46 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing drop
Time for drop_table_when_MANY_tables (5000): 160 wallclock secs (159.83 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing create+drop
Time for create+drop (10000): 1433 wallclock secs (1433.28 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for create_key+drop (10000): 1457 wallclock secs (1456.57 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 5141 wallclock secs (5140.97 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.13a alpha' at 2000-03-15 3:59:00
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 438 wallclock secs (437.15 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert of duplicates
Time for insert_duplicates (300000): 91 wallclock secs (91.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data from the table
Time for select_big (10:3000000): 173 wallclock secs (173.79 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_key (10:3000000): 175 wallclock secs (174.66 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by (10:3000000): 249 wallclock secs (248.87 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_diff_key (500:1000): 241 wallclock secs (240.63 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range_prefix (5010:42084): 33 wallclock secs (33.50 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (5010:42084): 34 wallclock secs (33.67 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key_prefix (200000): 542 wallclock secs (542.23 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key (200000): 540 wallclock secs (540.35 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 33 wallclock secs (33.12 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (20000:43500): 33 wallclock secs (33.45 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_group (111): 58 wallclock secs (58.22 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max_on_key (3000): 7 wallclock secs ( 6.87 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max (60): 33 wallclock secs (32.90 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_on_key (100): 52 wallclock secs (52.23 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count (100): 55 wallclock secs (54.87 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys with functions
Time for update_of_key (500): 1 wallclock secs ( 0.99 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for update_of_key_big (501): 38 wallclock secs (38.07 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update with key
Time for update_with_key (100000): 432 wallclock secs (432.20 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of all rows
Time for update_big (500): 84 wallclock secs (83.33 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing left outer join
Time for outer_join_on_key (10:10): 63 wallclock secs (63.33 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join (10:10): 146 wallclock secs (145.66 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_found (10:10): 144 wallclock secs (144.40 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Note: Query took longer then time-limit: 600
Estimating end time based on:
75 queries in 74 loops of 500 loops took 606 seconds
Estimated time for outer_join_not_found (500:506): 606 wallclock secs (605.55 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing delete
Time for delete_key (500): 0 wallclock secs ( 0.82 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for delete_big (12): 22 wallclock secs (21.59 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 364 wallclock secs (363.72 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys
Time for update_of_key (256): 58 wallclock secs (58.22 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Deleting everything from table
Time for delete_big_many_keys (2): 112 wallclock secs (112.11 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting 100000 rows with multiple values
Time for multiple_value_insert (100000): 11 wallclock secs (11.64 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop table(1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Estimated total time: 8358 wallclock secs (8356.07 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.13a alpha' at 2000-03-15 5:20:12
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 16 wallclock secs (15.32 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 1.81 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (410:1057904): 356 wallclock secs (356.09 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max_on_key (70000): 332 wallclock secs (332.69 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_on_key (50000): 588 wallclock secs (587.48 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 1294 wallclock secs (1293.83 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.13a alpha' at 2000-03-15 5:41:46
Wisconsin benchmark test
Time for create_table (3): 1 wallclock secs ( 0.99 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (31000): 44 wallclock secs (43.67 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time to delete_big (1): 1 wallclock secs ( 0.66 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Running actual benchmark
Time for wisc_benchmark (114): 17 wallclock secs (17.08 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 63 wallclock secs (62.40 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.21.34' at 1999-02-28 15:42:21
ATIS table test
Creating tables
Time for create_table (28): 0 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Inserting data
Time to insert (9768): 3 wallclock secs ( 0.90 usr 0.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Retrieving data
Time for select_simple_join (500): 3 wallclock secs ( 0.95 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for select_join (200): 26 wallclock secs ( 6.04 usr 3.37 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for select_distinct (800): 17 wallclock secs ( 2.46 usr 1.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for select_group (2700): 15 wallclock secs ( 2.27 usr 0.57 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Total time: 64 wallclock secs (12.66 usr 5.60 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing server 'MySQL 3.23.3 alpha' at 1999-09-17 8:33:50
ATIS table test
Creating tables
Time for create_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (9768): 4 wallclock secs ( 1.34 usr 0.15 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data
Time for select_simple_join (500): 3 wallclock secs ( 1.40 usr 0.18 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_join (200): 23 wallclock secs ( 9.91 usr 1.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_distinct (800): 19 wallclock secs ( 3.71 usr 0.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_group (2700): 16 wallclock secs ( 3.35 usr 0.37 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 65 wallclock secs (19.72 usr 2.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.17 alpha' at 2000-06-01 3:11:25
ATIS table test
Creating tables
Time for create_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (9768): 2 wallclock secs ( 0.45 usr 0.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data
Time for select_simple_join (500): 2 wallclock secs ( 0.59 usr 0.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_join (200): 13 wallclock secs ( 4.26 usr 2.21 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_distinct (800): 11 wallclock secs ( 1.69 usr 0.81 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_group (2800): 12 wallclock secs ( 1.56 usr 0.37 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 40 wallclock secs ( 8.55 usr 3.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.29a gamma' at 2000-12-24 6:39:08
ATIS table test
Creating tables
Time for create_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (9768): 3 wallclock secs ( 0.38 usr 0.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data
Time for select_simple_join (500): 2 wallclock secs ( 0.68 usr 0.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_join (100): 2 wallclock secs ( 0.57 usr 0.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key_prefix_join (100): 13 wallclock secs ( 4.13 usr 1.96 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_distinct (800): 11 wallclock secs ( 1.84 usr 0.70 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_group (2800): 13 wallclock secs ( 1.55 usr 0.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 44 wallclock secs ( 9.18 usr 4.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.37' at 2001-04-13 1:03:13
ATIS table test
Creating tables
Time for create_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (9768): 4 wallclock secs ( 0.59 usr 0.35 sys + 0.00 cusr 0.00 csys = 0.94 CPU)
Retrieving data
Time for select_simple_join (500): 2 wallclock secs ( 0.58 usr 0.28 sys + 0.00 cusr 0.00 csys = 0.86 CPU)
Time for select_join (100): 4 wallclock secs ( 0.50 usr 0.23 sys + 0.00 cusr 0.00 csys = 0.73 CPU)
Time for select_key_prefix_join (100): 13 wallclock secs ( 4.11 usr 2.03 sys + 0.00 cusr 0.00 csys = 6.14 CPU)
Time for select_distinct (800): 15 wallclock secs ( 1.57 usr 0.72 sys + 0.00 cusr 0.00 csys = 2.29 CPU)
Time for select_group (2800): 20 wallclock secs ( 1.51 usr 0.44 sys + 0.00 cusr 0.00 csys = 1.95 CPU)
Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 58 wallclock secs ( 8.87 usr 4.05 sys + 0.00 cusr 0.00 csys = 12.92 CPU)
Testing server 'MySQL 3.21.33' at 1998-08-21 11:28:22
ATIS table test
Creating tables
Time for create_table (28): 0 secs ( 0.01 usr 0.01 sys = 0.02 cpu)
Inserting data
Time to insert (9768): 5 secs ( 0.48 usr 0.75 sys = 1.23 cpu)
Retrieving data
Time for select_simple_join (500): 2 secs ( 0.62 usr 1.00 sys = 1.62 cpu)
Time for select_join (200): 31 secs ( 2.65 usr 11.18 sys = 13.83 cpu)
Time for select_distinct (800): 20 secs ( 1.11 usr 2.93 sys = 4.04 cpu)
Time for select_group (2700): 17 secs ( 0.65 usr 2.51 sys = 3.16 cpu)
Removing tables
Time to drop_table (28): 0 secs ( 0.00 usr 0.01 sys = 0.01 cpu)
Total time: 75 secs ( 5.52 usr 18.40 sys = 23.92 cpu)
Benchmark DBD suite: 2.0b
Date of test: 1999-02-28 15:42:20
Running tests on: Linux 2.2.1 i686
Arguments:
Comments: pentiumpro 400mz x2, 256M, SCSI, gcc 2.9 compiled, key_buffer=1M
Limits from:
Server version: MySQL 3.21.34
ATIS: Total time: 64 wallclock secs (12.66 usr 5.60 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
alter-table: Total time: 815 wallclock secs ( 0.61 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
big-tables: Total time: 188 wallclock secs (10.99 usr 10.75 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
connect: Total time: 148 wallclock secs (66.47 usr 42.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
create: Total time: 11 wallclock secs ( 1.68 usr 0.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
insert: Total time: 1971 wallclock secs (362.50 usr 103.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
select: Estimated total time: 1794 wallclock secs (79.34 usr 14.54 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
wisconsin: Total time: 20 wallclock secs ( 4.70 usr 2.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
All 8 test executed successfully
Tests with estimated time have a + at end of line
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 476 0.35 0.09 0.00 992
alter_table_drop 338 0.17 0.03 0.00 496
connect 30 14.51 10.85 0.00 10000
connect+select 34 18.27 7.11 0.00 10000
count 61 0.09 0.03 0.00 100
count_on_key 640 27.18 3.47 0.00 50100 +
create+drop 1 0.37 0.06 0.00 1000
create_index 0 0.00 0.00 0.00 8
create_key+drop 3 0.77 0.07 0.00 1000
create_table 5 0.29 0.06 0.00 1031
delete_big 206 0.00 0.00 0.00 15
delete_key 12 0.02 0.02 0.00 500
drop_index 0 0.00 0.00 0.00 8
drop_table 1 0.08 0.02 0.00 1028
insert 149 34.57 12.16 0.00 350768
insert_duplicates 26 6.84 3.72 0.00 300000
insert_key 157 14.40 3.85 0.00 100000
insert_many_fields 82 0.57 0.12 0.00 2000
min_max 23 0.04 0.00 0.00 60
min_max_on_key 1260 40.08 4.69 0.00 73000 +
order_by 71 28.62 19.26 0.00 10
order_by_key 71 28.30 20.45 0.00 10
select 7 1.71 1.41 0.00 20000
select_big 97 44.76 25.37 0.00 10080
select_distinct 17 2.46 1.09 0.00 800
select_group 96 2.58 0.70 0.00 3811
select_join 26 6.04 3.37 0.00 200
select_key 213 109.13 13.71 0.00 200000
select_key_prefix 247 94.75 12.42 0.00 200000
select_many_fields 106 10.40 10.63 0.00 2000
select_range 349 21.73 7.72 0.00 25420
select_range_prefix 38 10.18 1.70 0.00 25010
select_simple 34 15.95 13.30 0.00 20000
select_simple_join 3 0.95 0.31 0.00 500
update_key 0 0.05 0.03 0.00 500
update_key_big 52 0.04 0.01 0.00 501
update_of_key 72 0.03 0.03 0.00 256
wisc_benchmark 6 2.48 1.08 0.00 114
TOTALS 5009 538.76 178.94 0.00 1411318 ++
Benchmark DBD suite: 2.4
Date of test: 1999-09-17 8:33:49
Running tests on: Linux 2.2.10 i686
Arguments: --force
Comments:
Limits from:
Server version: MySQL 3.23.3 alpha
ATIS: Total time: 65 wallclock secs (19.72 usr 2.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
alter-table: Total time: 514 wallclock secs ( 0.65 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
big-tables: Total time: 50 wallclock secs (18.30 usr 6.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
connect: Total time: 120 wallclock secs (66.02 usr 10.12 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
create: Total time: 9 wallclock secs ( 2.21 usr 0.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
insert: Estimated total time: 6039 wallclock secs (648.24 usr 76.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
select: Total time: 1209 wallclock secs (110.89 usr 8.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
wisconsin: Total time: 23 wallclock secs ( 7.83 usr 1.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
All 8 test executed successfully
Tests with estimated time have a + at end of line
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 280 0.37 0.03 0.00 992
alter_table_drop 222 0.16 0.01 0.00 496
connect 18 13.36 1.13 0.00 10000
connect+select_1_row 23 14.47 1.60 0.00 10000
connect+select_simpl 21 14.34 1.42 0.00 10000
count 73 0.08 0.02 0.00 100
count_on_key 648 38.95 2.36 0.00 50100
create+drop 1 0.63 0.01 0.00 1000
create_index 5 0.00 0.00 0.00 8
create_key+drop 2 0.78 0.07 0.00 1000
create_table 4 0.46 0.07 0.00 1031
delete_big 21 0.00 0.00 0.00 13
delete_big_many_keys 257 0.00 0.00 0.00 2
delete_key 1 0.12 0.00 0.00 500
drop_index 6 0.00 0.00 0.00 8
drop_table 1 0.15 0.05 0.00 1028
insert 173 53.61 9.85 0.00 350768
insert_duplicates 16 5.74 1.67 0.00 300000
insert_key 209 21.42 3.48 0.00 100000
insert_many_fields 13 0.88 0.09 0.00 2000
min_max 37 0.04 0.01 0.00 60
min_max_on_key 265 53.70 3.63 0.00 73000
multiple_value_inser 9 2.46 0.02 0.00 100000
order_by 90 46.67 8.04 0.00 10
order_by_key 69 46.84 7.93 0.00 10
outer_join 84 0.00 0.00 0.00 10
outer_join_found 82 0.01 0.00 0.00 10
outer_join_not_found 605 0.13 0.00 0.00 500 +
outer_join_on_key 65 0.02 0.00 0.00 10
select_1_row 3 1.05 0.33 0.00 10000
select_2_rows 4 1.26 0.35 0.00 10000
select_big 118 67.93 13.05 0.00 10080
select_diff_key 305 0.55 0.02 0.00 500
select_distinct 19 3.71 0.50 0.00 800
select_group 75 3.65 0.46 0.00 3811
select_join 23 9.91 1.72 0.00 200
select_key 401 164.79 11.47 0.00 200000
select_key_prefix 406 175.76 12.40 0.00 200000
select_many_fields 37 17.41 6.17 0.00 2000
select_range 387 33.53 3.87 0.00 25420
select_range_prefix 42 15.23 1.30 0.00 25010
select_simple 2 0.74 0.31 0.00 10000
select_simple_join 3 1.40 0.18 0.00 500
update_big 34 0.00 0.00 0.00 500
update_of_key 61 0.11 0.06 0.00 756
update_of_key_big 29 0.08 0.00 0.00 501
update_with_key 381 57.20 11.84 0.00 100000
wisc_benchmark 7 3.42 0.58 0.00 114
TOTALS 5637 873.12 106.10 0.00 1612848 +
Benchmark DBD suite: 2.8
Date of test: 2000-06-01 11:07:08
Running tests on: Linux 2.2.14 i686
Arguments:
Comments: Pentium 2x550 MZ Xeon, 512M, IDE, 16M key_buffer
Limits from:
Server version: MySQL 3.23.17 alpha
alter-table: Total time: 359 wallclock secs ( 0.47 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
ATIS: Total time: 40 wallclock secs ( 8.55 usr 3.92 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
big-tables: Total time: 33 wallclock secs ( 8.89 usr 7.76 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
connect: Total time: 74 wallclock secs (30.09 usr 16.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
create: Total time: 419 wallclock secs (10.58 usr 3.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
insert: Total time: 1552 wallclock secs (263.84 usr 86.41 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
select: Total time: 1797 wallclock secs (126.08 usr 67.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
wisconsin: Total time: 15 wallclock secs ( 3.20 usr 1.66 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
All 8 test executed successfully
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 198.00 0.29 0.06 0.00 992
alter_table_drop 154.00 0.14 0.04 0.00 496
connect 12.00 6.38 2.82 0.00 10000
connect+select_1_row 15.00 6.57 3.78 0.00 10000
connect+select_simple 13.00 6.71 3.26 0.00 10000
count 42.00 0.10 0.00 0.00 100
count_distinct 110.00 0.78 0.10 0.00 1000
count_distinct_big 632.00 73.98 55.47 0.00 1020
count_distinct_group 75.00 1.20 0.44 0.00 1000
count_distinct_group_on_key 58.00 0.41 0.03 0.00 1000
count_distinct_group_on_key_parts 74.00 1.18 0.60 0.00 1000
count_group_on_key_parts 0.00 0.00 0.00 0.00 0
count_on_key 557.00 17.72 2.69 0.00 50100
create+drop 19.00 2.43 0.72 0.00 10000
create_MANY_tables 263.00 2.58 0.48 0.00 10000
create_index 4.00 0.00 0.00 0.00 8
create_key+drop 18.00 3.88 0.95 0.00 10000
create_table 0.00 0.00 0.00 0.00 31
delete_big 15.00 0.01 0.00 0.00 13
delete_big_many_keys 59.00 0.00 0.00 0.00 2
delete_key 3.00 0.73 0.34 0.00 10000
drop_index 3.00 0.01 0.00 0.00 8
drop_table 0.00 0.00 0.00 0.00 28
drop_table_when_MANY_tables 22.00 0.59 0.42 0.00 10000
insert 109.00 20.53 9.93 0.00 350768
insert_duplicates 14.00 3.03 3.14 0.00 300000
insert_key 111.00 10.00 3.74 0.00 100000
insert_many_fields 10.00 0.45 0.11 0.00 2000
min_max 29.00 0.00 0.00 0.00 60
min_max_on_key 219.00 26.38 4.12 0.00 85000
multiple_value_insert 7.00 1.92 0.05 0.00 100000
order_by 47.00 20.41 15.62 0.00 10
order_by_key 30.00 20.22 10.11 0.00 10
outer_join 63.00 0.01 0.00 0.00 10
outer_join_found 60.00 0.00 0.00 0.00 10
outer_join_not_found 46.00 0.00 0.00 0.00 500
outer_join_on_key 51.00 0.00 0.00 0.00 10
select_1_row 2.00 0.54 0.38 0.00 10000
select_2_rows 3.00 0.57 0.44 0.00 10000
select_big 59.00 29.63 15.74 0.00 10080
select_diff_key 174.00 0.28 0.03 0.00 500
select_distinct 11.00 1.69 0.81 0.00 800
select_group 64.00 1.61 0.41 0.00 2911
select_group_when_MANY_tables 97.00 1.09 0.52 0.00 10000
select_join 13.00 4.26 2.21 0.00 200
select_key 128.00 66.75 11.61 0.00 200000
select_key_prefix 130.00 66.91 10.48 0.00 200000
select_many_fields 23.00 8.43 7.65 0.00 2000
select_range 208.00 14.67 5.30 0.00 25420
select_range_prefix 18.00 5.83 1.38 0.00 25010
select_simple 2.00 0.34 0.39 0.00 10000
select_simple_join 2.00 0.59 0.26 0.00 500
update_big 28.00 0.00 0.00 0.00 500
update_of_key 59.00 2.65 1.37 0.00 756
update_of_key_big 25.00 0.02 0.01 0.00 501
update_with_key 97.00 15.38 7.87 0.00 100000
wisc_benchmark 4.00 1.72 0.77 0.00 114
TOTALS 4289.00 451.60 186.65 0.00 1684468
Benchmark DBD suite: 2.12
Date of test: 2000-12-24 15:30:34
Running tests on: Linux 2.2.14-my-SMP i686
Arguments:
Comments: Intel Xeon, 2x550 Mhz, 1G ram, key_buffer=16M
Limits from:
Server version: MySQL 3.23.29a gamma
ATIS: Total time: 44 wallclock secs ( 9.18 usr 4.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
alter-table: Total time: 468 wallclock secs ( 0.25 usr 0.12 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
big-tables: Total time: 31 wallclock secs ( 8.67 usr 6.87 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
connect: Total time: 79 wallclock secs (34.39 usr 17.84 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
create: Total time: 126 wallclock secs (10.09 usr 3.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
insert: Total time: 2227 wallclock secs (451.81 usr 170.75 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
select: Total time: 1379 wallclock secs (61.35 usr 18.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
wisconsin: Total time: 16 wallclock secs ( 3.25 usr 1.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
All 8 test executed successfully
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 263.00 0.10 0.05 0.00 992
alter_table_drop 197.00 0.05 0.03 0.00 496
connect 12.00 7.18 2.61 0.00 10000
connect+select_1_row 15.00 7.75 3.29 0.00 10000
connect+select_simple 14.00 7.39 3.61 0.00 10000
count 47.00 0.05 0.00 0.00 100
count_distinct 119.00 0.66 0.17 0.00 2000
count_distinct_big 111.00 7.48 5.86 0.00 120
count_distinct_group 80.00 1.02 0.43 0.00 1000
count_distinct_group_on_key 60.00 0.40 0.08 0.00 1000
count_distinct_group_on_key_parts 81.00 0.79 0.41 0.00 1000
count_group_on_key_parts 54.00 1.00 0.47 0.00 1000
count_on_key 559.00 17.07 3.00 0.00 50100
create+drop 29.00 2.43 0.82 0.00 10000
create_MANY_tables 29.00 2.27 0.46 0.00 10000
create_index 4.00 0.00 0.00 0.00 8
create_key+drop 40.00 3.97 0.87 0.00 10000
create_table 0.00 0.00 0.00 0.00 31
delete_all 20.00 0.00 0.00 0.00 12
delete_all_many_keys 151.00 0.03 0.01 0.00 1
delete_big 0.00 0.00 0.00 0.00 1
delete_big_many_keys 151.00 0.03 0.01 0.00 128
delete_key 4.00 0.56 0.45 0.00 10000
drop_index 4.00 0.00 0.00 0.00 8
drop_table 0.00 0.01 0.00 0.00 28
drop_table_when_MANY_tables 12.00 0.63 0.43 0.00 10000
insert 131.00 22.93 14.02 0.00 350768
insert_duplicates 24.00 3.18 3.05 0.00 100000
insert_key 154.00 10.52 4.42 0.00 100000
insert_many_fields 9.00 0.44 0.07 0.00 2000
insert_select_1_key 7.00 0.00 0.00 0.00 1
insert_select_2_keys 9.00 0.00 0.00 0.00 1
min_max 32.00 0.01 0.00 0.00 60
min_max_on_key 238.00 27.34 5.17 0.00 85000
multiple_value_insert 9.00 1.88 0.06 0.00 100000
order_by_big 52.00 22.29 15.95 0.00 10
order_by_big_key 34.00 23.91 10.39 0.00 10
order_by_big_key2 33.00 22.58 10.06 0.00 10
order_by_big_key_desc 36.00 23.89 10.41 0.00 10
order_by_big_key_diff 50.00 22.07 16.43 0.00 10
order_by_big_key_prefix 32.00 22.21 10.50 0.00 10
order_by_key2_diff 5.00 1.12 0.07 0.00 500
order_by_key_prefix 3.00 1.10 0.38 0.00 500
order_by_range 5.00 1.14 0.41 0.00 500
outer_join 72.00 0.00 0.00 0.00 10
outer_join_found 67.00 0.00 0.01 0.00 10
outer_join_not_found 50.00 0.00 0.00 0.00 500
outer_join_on_key 57.00 0.01 0.00 0.00 10
select_1_row 2.00 0.45 0.57 0.00 10000
select_2_rows 3.00 0.59 0.61 0.00 10000
select_big 62.00 32.48 15.99 0.00 10080
select_column+column 3.00 0.58 0.68 0.00 10000
select_diff_key 191.00 0.33 0.03 0.00 500
select_distinct 11.00 1.84 0.70 0.00 800
select_group 67.00 1.60 0.52 0.00 2911
select_group_when_MANY_tables 16.00 0.79 0.62 0.00 10000
select_join 2.00 0.57 0.28 0.00 100
select_key 126.00 68.35 14.42 0.00 200000
select_key2 132.00 69.41 13.86 0.00 200000
select_key_prefix 134.00 71.19 14.71 0.00 200000
select_key_prefix_join 13.00 4.13 1.96 0.00 100
select_many_fields 22.00 8.22 6.80 0.00 2000
select_range 194.00 9.08 3.79 0.00 410
select_range_key2 18.00 6.36 1.64 0.00 25010
select_range_prefix 20.00 6.82 1.38 0.00 25010
select_simple 2.00 0.47 0.71 0.00 10000
select_simple_join 2.00 0.68 0.19 0.00 500
update_big 64.00 0.00 0.00 0.00 10
update_of_key 23.00 2.80 2.29 0.00 50000
update_of_key_big 33.00 0.11 0.03 0.00 501
update_of_primary_key_many_keys 65.00 0.02 0.01 0.00 256
update_with_key 113.00 17.01 12.17 0.00 300000
update_with_key_prefix 32.00 5.68 4.36 0.00 100000
wisc_benchmark 4.00 1.80 0.69 0.00 114
TOTALS 4519.00 578.85 223.47 0.00 2046247
Benchmark DBD suite: 2.12
Date of test: 2001-04-13 3:48:03
Running tests on: Linux 2.4.0-64GB-SMP i686
Arguments:
Comments: Intel Xeon, 2x550 Mhz, 512M, key_buffer=16M
Limits from:
Server version: MySQL 3.23.37
ATIS: Total time: 58 wallclock secs ( 8.87 usr 4.05 sys + 0.00 cusr 0.00 csys = 12.92 CPU)
alter-table: Total time: 471 wallclock secs ( 0.16 usr 0.11 sys + 0.00 cusr 0.00 csys = 0.27 CPU)
big-tables: Total time: 33 wallclock secs ( 9.47 usr 7.95 sys + 0.00 cusr 0.00 csys = 17.42 CPU)
connect: Total time: 90 wallclock secs (35.94 usr 18.84 sys + 0.00 cusr 0.00 csys = 54.78 CPU)
create: Total time: 154 wallclock secs ( 9.67 usr 5.83 sys + 0.00 cusr 0.00 csys = 15.50 CPU)
insert: Total time: 2773 wallclock secs (682.10 usr 184.31 sys + 0.00 cusr 0.00 csys = 866.41 CPU)
select: Total time: 1999 wallclock secs (72.84 usr 16.51 sys + 0.00 cusr 0.00 csys = 89.35 CPU)
wisconsin: Total time: 19 wallclock secs ( 3.95 usr 1.81 sys + 0.00 cusr 0.00 csys = 5.76 CPU)
All 8 test executed successfully
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 261.00 0.09 0.06 0.15 992
alter_table_drop 199.00 0.02 0.03 0.05 496
connect 17.00 7.03 3.40 10.43 10000
connect+select_1_row 16.00 7.76 4.01 11.77 10000
connect+select_simple 15.00 6.96 3.91 10.87 10000
count 45.00 0.02 0.01 0.03 100
count_distinct 65.00 0.34 0.05 0.39 1000
count_distinct_2 65.00 0.36 0.12 0.48 1000
count_distinct_big 162.00 7.60 3.30 10.90 120
count_distinct_group 190.00 1.21 0.35 1.56 1000
count_distinct_group_on_key 64.00 0.44 0.07 0.51 1000
count_distinct_group_on_key_parts 191.00 1.32 0.34 1.66 1000
count_distinct_key_prefix 56.00 0.40 0.05 0.45 1000
count_group_on_key_parts 60.00 1.17 0.36 1.53 1000
count_on_key 595.00 17.92 2.91 20.83 50100
create+drop 33.00 2.64 0.72 3.36 10000
create_MANY_tables 43.00 2.34 2.21 4.55 10000
create_index 5.00 0.00 0.00 0.00 8
create_key+drop 39.00 3.05 0.72 3.77 10000
create_table 1.00 0.00 0.00 0.00 31
delete_all 18.00 0.00 0.00 0.00 12
delete_all_many_keys 77.00 0.01 0.00 0.01 1
delete_big 0.00 0.00 0.00 0.00 1
delete_big_many_keys 77.00 0.01 0.00 0.01 128
delete_key 4.00 0.63 0.36 0.99 10000
drop_index 5.00 0.00 0.00 0.00 8
drop_table 0.00 0.00 0.00 0.00 28
drop_table_when_MANY_tables 19.00 0.87 0.84 1.71 10000
insert 144.00 23.75 13.55 37.30 350768
insert_duplicates 31.00 5.12 4.16 9.28 100000
insert_key 138.00 10.42 6.05 16.47 100000
insert_many_fields 10.00 0.45 0.18 0.63 2000
insert_select_1_key 7.00 0.00 0.00 0.00 1
insert_select_2_keys 10.00 0.00 0.00 0.00 1
min_max 33.00 0.02 0.01 0.03 60
min_max_on_key 237.00 28.39 4.10 32.49 85000
multiple_value_insert 10.00 1.86 0.05 1.91 100000
order_by_big 77.00 21.83 9.99 31.82 10
order_by_big_key 35.00 23.27 10.06 33.33 10
order_by_big_key2 32.00 21.65 10.20 31.85 10
order_by_big_key_desc 37.00 23.23 10.33 33.56 10
order_by_big_key_diff 73.00 21.66 10.13 31.79 10
order_by_big_key_prefix 33.00 21.44 10.24 31.68 10
order_by_key2_diff 9.00 1.45 0.89 2.34 500
order_by_key_prefix 5.00 1.17 0.47 1.64 500
order_by_range 8.00 1.39 0.40 1.79 500
outer_join 112.00 0.00 0.00 0.00 10
outer_join_found 110.00 0.00 0.00 0.00 10
outer_join_not_found 60.00 0.00 0.01 0.01 500
outer_join_on_key 58.00 0.01 0.00 0.01 10
select_1_row 2.00 0.51 0.65 1.16 10000
select_2_rows 4.00 0.38 0.40 0.78 10000
select_big 64.00 34.10 15.48 49.58 10080
select_column+column 3.00 0.60 0.52 1.12 10000
select_diff_key 202.00 0.26 0.06 0.32 500
select_distinct 15.00 1.57 0.72 2.29 800
select_group 80.00 1.55 0.45 2.00 2911
select_group_when_MANY_tables 20.00 0.77 1.34 2.11 10000
select_join 4.00 0.50 0.23 0.73 100
select_key 131.00 69.07 11.59 80.66 200000
select_key2 140.00 87.29 11.66 98.95 200000
select_key2_return_key 133.00 65.03 10.13 75.16 200000
select_key2_return_prim 133.00 75.39 11.05 86.44 200000
select_key_prefix 141.00 80.77 13.11 93.88 200000
select_key_prefix_join 13.00 4.11 2.03 6.14 100
select_key_return_key 125.00 62.45 10.60 73.05 200000
select_many_fields 22.00 9.01 7.77 16.78 2000
select_query_cache 131.00 3.58 0.61 4.19 10000
select_query_cache2 131.00 3.78 0.68 4.46 10000
select_range 206.00 8.92 3.47 12.39 410
select_range_key2 21.00 5.77 1.51 7.28 25010
select_range_prefix 23.00 6.49 1.10 7.59 25010
select_simple 2.00 0.53 0.54 1.07 10000
select_simple_join 2.00 0.58 0.28 0.86 500
update_big 65.00 0.00 0.00 0.00 10
update_of_key 25.00 2.74 1.81 4.55 50000
update_of_key_big 36.00 0.01 0.07 0.08 501
update_of_primary_key_many_keys 43.00 0.05 0.02 0.07 256
update_with_key 119.00 19.18 12.34 31.52 300000
update_with_key_prefix 36.00 6.37 3.90 10.27 100000
wisc_benchmark 5.00 2.23 0.61 2.84 114
TOTALS 5668.00 822.89 239.37 1062.26 2667247
Benchmark DBD suit: 2.0
Date of test: 1998-08-21 13:27:33
Running tests on: Linux 2.0.35 i686
Arguments:
Comments: pentiumpro 400mz x2, 256M, SCSI, pgcc compiled, key_buffer=16M
Limits from:
Server version: MySQL 3.21.33
ATIS: Total time: 75 secs ( 5.52 usr 18.40 sys = 23.92 cpu)
alter-table: Total time: 1949 secs ( 0.26 usr 0.93 sys = 1.19 cpu)
big-tables: Total time: 281 secs ( 5.73 usr 22.43 sys = 28.16 cpu)
connect: Total time: 230 secs (31.85 usr 149.46 sys = 181.31 cpu)
create: Total time: 20 secs ( 0.81 usr 1.22 sys = 2.03 cpu)
insert: Total time: 2988 secs (172.92 usr 472.21 sys = 645.13 cpu)
select: Estimated total time: 2117 secs (34.14 usr 74.63 sys = 108.77 cpu)
wisconsin: Total time: 27 secs ( 2.40 usr 5.39 sys = 7.79 cpu)
All 8 test executed successfully
Tests with estimated time have a + at end of line
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 1949 0.25 0.80 1.05 1992
connect 54 6.43 42.04 48.47 10000
connect+select 58 8.15 42.24 50.39 10000
count 58 0.00 0.09 0.09 100
count_on_key 780 13.21 22.42 35.63 50100 +
create+drop 3 0.20 0.31 0.51 1000
create_index 0 0.00 0.00 0.00 8
create_key+drop 3 0.21 0.45 0.66 1000
create_table 8 0.31 0.23 0.54 1031
delete_big 148 0.00 0.01 0.01 15
delete_key 0 0.05 0.06 0.11 500
drop_index 0 0.00 0.00 0.00 8
drop_table 1 0.02 0.07 0.09 1028
insert 206 15.52 39.66 55.18 350768
insert_duplicates 34 4.70 8.53 13.23 300000
insert_key 698 8.57 12.08 20.65 100000
insert_many_fields 129 0.22 0.65 0.87 2000
min_max 25 0.00 0.07 0.07 60
min_max_on_key 1474 14.64 38.47 53.11 73000 +
order_by 91 1.63 63.23 64.86 5
order_by_key 92 1.88 62.99 64.87 5
select 11 0.76 4.55 5.31 20000
select_big 103 12.16 68.00 80.16 10080
select_distinct 20 1.11 2.93 4.04 800
select_group 114 0.74 2.79 3.53 3811
select_join 31 2.65 11.18 13.83 200
select_key 247 65.75 99.71 165.46 200000
select_key_prefix 270 60.23 105.01 165.24 200000
select_many_fields 151 5.51 21.76 27.27 2000
select_range 357 9.98 24.74 34.72 25420
select_range_prefix 42 4.12 10.63 14.75 25010
select_simple 60 7.72 44.33 52.05 20000
select_simple_join 2 0.62 1.00 1.62 500
update_key 356 4.81 10.29 15.10 100256
update_key_big 85 0.41 0.31 0.72 105003
wisc_benchmark 7 1.04 2.86 3.90 114
TOTALS 7667 253.60 744.49 998.09 1615814 ++
Testing server 'MySQL 3.21.34' at 1999-02-28 15:43:25
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 1 wallclock secs ( 0.08 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for alter_table_add (992): 476 wallclock secs ( 0.35 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for create_index (8): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for drop_index (8): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for alter_table_drop (496): 338 wallclock secs ( 0.17 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Total time: 815 wallclock secs ( 0.61 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing server 'MySQL 3.23.17 alpha' at 2000-06-01 3:05:26
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 0 wallclock secs ( 0.03 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for alter_table_add (992): 198 wallclock secs ( 0.29 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for create_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop_index (8): 3 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for alter_table_drop (496): 154 wallclock secs ( 0.14 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 359 wallclock secs ( 0.47 usr 0.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.29a gamma' at 2000-12-24 6:39:53
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 0 wallclock secs ( 0.09 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for alter_table_add (992): 263 wallclock secs ( 0.10 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for create_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for alter_table_drop (496): 197 wallclock secs ( 0.05 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 468 wallclock secs ( 0.25 usr 0.12 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.3 alpha' at 1999-09-17 8:34:56
Testing server 'MySQL 3.23.37' at 2001-04-13 1:04:11
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 0 wallclock secs ( 0.12 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for insert (1000) 1 wallclock secs ( 0.05 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.07 CPU)
Time for alter_table_add (992): 280 wallclock secs ( 0.37 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for alter_table_add (992): 261 wallclock secs ( 0.09 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.15 CPU)
Time for create_index (8): 5 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop_index (8): 6 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop_index (8): 5 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for alter_table_drop (496): 222 wallclock secs ( 0.16 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for alter_table_drop (496): 199 wallclock secs ( 0.02 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Total time: 514 wallclock secs ( 0.65 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 471 wallclock secs ( 0.16 usr 0.11 sys + 0.00 cusr 0.00 csys = 0.27 CPU)
Testing server 'MySQL 3.21.33' at 1998-08-21 11:29:38
Testing of ALTER TABLE
Testing with 2000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 0 secs ( 0.01 usr 0.13 sys = 0.14 cpu)
Time for alter_table_add (1992): 1949 secs ( 0.25 usr 0.80 sys = 1.05 cpu)
Time for create_index (8): 0 secs ( 0.00 usr 0.00 sys = 0.00 cpu)
Time for drop_index (8): 0 secs ( 0.00 usr 0.00 sys = 0.00 cpu)
Total time: 1949 secs ( 0.26 usr 0.93 sys = 1.19 cpu)
Testing server 'MySQL 3.21.34' at 1999-02-28 15:57:02
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
Time to select_many_fields(1000): 16 wallclock secs ( 6.68 usr 4.57 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing select all_fields from table with 1 record
Time to select_many_fields(1000): 90 wallclock secs ( 3.72 usr 6.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing insert VALUES()
Time to insert_many_fields(1000): 4 wallclock secs ( 0.47 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing insert (all_fields) VALUES()
Time to insert_many_fields(1000): 78 wallclock secs ( 0.10 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Total time: 188 wallclock secs (10.99 usr 10.75 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing server 'MySQL 3.23.3 alpha' at 1999-09-17 8:43:30
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
Time to select_many_fields(1000): 16 wallclock secs ( 8.58 usr 2.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select all_fields from table with 1 record
Time to select_many_fields(1000): 21 wallclock secs ( 8.83 usr 3.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert VALUES()
Time to insert_many_fields(1000): 4 wallclock secs ( 0.77 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert (all_fields) VALUES()
Time to insert_many_fields(1000): 9 wallclock secs ( 0.11 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 50 wallclock secs (18.30 usr 6.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.17 alpha' at 2000-06-01 3:12:05
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
Time to select_many_fields(1000): 10 wallclock secs ( 4.33 usr 3.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select all_fields from table with 1 record
Time to select_many_fields(1000): 13 wallclock secs ( 4.10 usr 3.76 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert VALUES()
Time to insert_many_fields(1000): 3 wallclock secs ( 0.40 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert (all_fields) VALUES()
Time to insert_many_fields(1000): 7 wallclock secs ( 0.05 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 33 wallclock secs ( 8.89 usr 7.76 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.29a gamma' at 2000-12-24 6:47:42
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
Time to select_many_fields(1000): 9 wallclock secs ( 4.02 usr 3.44 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select all_fields from table with 1 record
Time to select_many_fields(1000): 13 wallclock secs ( 4.20 usr 3.36 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert VALUES()
Time to insert_many_fields(1000): 3 wallclock secs ( 0.37 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert (all_fields) VALUES()
Time to insert_many_fields(1000): 6 wallclock secs ( 0.07 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 31 wallclock secs ( 8.67 usr 6.87 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.21.33' at 1998-08-21 12:02:09
Testing server 'MySQL 3.23.37' at 2001-04-13 1:12:02
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
Time to select_many_fields(1000): 16 secs ( 3.22 usr 10.31 sys = 13.53 cpu)
Time to select_many_fields(1000): 9 wallclock secs ( 4.67 usr 4.30 sys + 0.00 cusr 0.00 csys = 8.97 CPU)
Testing select all_fields from table with 1 record
Time to select_many_fields(1000): 135 secs ( 2.29 usr 11.45 sys = 13.74 cpu)
Time to select_many_fields(1000): 13 wallclock secs ( 4.34 usr 3.47 sys + 0.00 cusr 0.00 csys = 7.81 CPU)
Testing insert VALUES()
Time to insert_many_fields(1000): 4 secs ( 0.13 usr 0.51 sys = 0.64 cpu)
Time to insert_many_fields(1000): 3 wallclock secs ( 0.42 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.49 CPU)
Testing insert (all_fields) VALUES()
Time to insert_many_fields(1000): 125 secs ( 0.09 usr 0.14 sys = 0.23 cpu)
Time to insert_many_fields(1000): 7 wallclock secs ( 0.03 usr 0.11 sys + 0.00 cusr 0.00 csys = 0.14 CPU)
Total time: 281 secs ( 5.73 usr 22.43 sys = 28.16 cpu)
Total time: 33 wallclock secs ( 9.47 usr 7.95 sys + 0.00 cusr 0.00 csys = 17.42 CPU)
Testing server 'MySQL 3.21.34' at 1999-02-28 16:00:11
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 30 wallclock secs (14.51 usr 10.85 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Test connect/simple select/disconnect
Time for select_simple (10000): 32 wallclock secs (15.36 usr 12.85 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Test simple select
Time for select_simple (10000): 2 wallclock secs ( 0.59 usr 0.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing connect/select 1 row from table/disconnect
Time to connect+select (10000): 34 wallclock secs (18.27 usr 7.11 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing select 1 row from table
Time to select (10000): 3 wallclock secs ( 0.91 usr 0.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing select 2 rows from table
Time to select (10000): 4 wallclock secs ( 0.80 usr 0.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing retrieval of big records (65000 bytes)
Time to select_big (10000): 43 wallclock secs (16.00 usr 9.63 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Total time: 148 wallclock secs (66.47 usr 42.30 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing server 'MySQL 3.23.3 alpha' at 1999-09-17 8:44:21
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 18 wallclock secs (13.36 usr 1.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test connect/simple select/disconnect
Time for connect+select_simple (10000): 21 wallclock secs (14.34 usr 1.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test simple select
Time for select_simple (10000): 2 wallclock secs ( 0.74 usr 0.31 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing connect/select 1 row from table/disconnect
Time to connect+select_1_row (10000): 23 wallclock secs (14.47 usr 1.60 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 1 row from table
Time to select_1_row (10000): 3 wallclock secs ( 1.05 usr 0.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 2 rows from table
Time to select_2_rows (10000): 4 wallclock secs ( 1.26 usr 0.35 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing retrieval of big records (65000 bytes)
Time to select_big (10000): 49 wallclock secs (20.80 usr 4.97 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 120 wallclock secs (66.02 usr 10.12 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.17 alpha' at 2000-06-01 3:12:38
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 12 wallclock secs ( 6.38 usr 2.82 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test connect/simple select/disconnect
Time for connect+select_simple (10000): 13 wallclock secs ( 6.71 usr 3.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test simple select
Time for select_simple (10000): 2 wallclock secs ( 0.34 usr 0.39 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing connect/select 1 row from table/disconnect
Time to connect+select_1_row (10000): 15 wallclock secs ( 6.57 usr 3.78 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 1 row from table
Time to select_1_row (10000): 2 wallclock secs ( 0.54 usr 0.38 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 2 rows from table
Time to select_2_rows (10000): 3 wallclock secs ( 0.57 usr 0.44 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing retrieval of big records (65000 bytes)
Time to select_big (10000): 27 wallclock secs ( 8.98 usr 5.65 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 74 wallclock secs (30.09 usr 16.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.29a gamma' at 2000-12-24 6:48:14
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 12 wallclock secs ( 7.18 usr 2.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test connect/simple select/disconnect
Time for connect+select_simple (10000): 14 wallclock secs ( 7.39 usr 3.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test simple select
Time for select_simple (10000): 2 wallclock secs ( 0.47 usr 0.71 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing connect/select 1 row from table/disconnect
Time to connect+select_1_row (10000): 15 wallclock secs ( 7.75 usr 3.29 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 1 row from table
Time to select_1_row (10000): 2 wallclock secs ( 0.45 usr 0.57 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 2 rows from table
Time to select_2_rows (10000): 3 wallclock secs ( 0.59 usr 0.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test select with aritmetic (+)
Time for select_column+column (10000): 3 wallclock secs ( 0.58 usr 0.68 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing retrieval of big records (65000 bytes)
Time to select_big (10000): 28 wallclock secs ( 9.97 usr 5.76 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 79 wallclock secs (34.39 usr 17.84 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.37' at 2001-04-13 1:12:35
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 17 wallclock secs ( 7.03 usr 3.40 sys + 0.00 cusr 0.00 csys = 10.43 CPU)
Test connect/simple select/disconnect
Time for connect+select_simple (10000): 15 wallclock secs ( 6.96 usr 3.91 sys + 0.00 cusr 0.00 csys = 10.87 CPU)
Test simple select
Time for select_simple (10000): 2 wallclock secs ( 0.53 usr 0.54 sys + 0.00 cusr 0.00 csys = 1.07 CPU)
Testing connect/select 1 row from table/disconnect
Time to connect+select_1_row (10000): 16 wallclock secs ( 7.76 usr 4.01 sys + 0.00 cusr 0.00 csys = 11.77 CPU)
Testing select 1 row from table
Time to select_1_row (10000): 2 wallclock secs ( 0.51 usr 0.65 sys + 0.00 cusr 0.00 csys = 1.16 CPU)
Testing select 2 rows from table
Time to select_2_rows (10000): 4 wallclock secs ( 0.38 usr 0.40 sys + 0.00 cusr 0.00 csys = 0.78 CPU)
Test select with aritmetic (+)
Time for select_column+column (10000): 3 wallclock secs ( 0.60 usr 0.52 sys + 0.00 cusr 0.00 csys = 1.12 CPU)
Testing retrieval of big records (65000 bytes)
Time to select_big (10000): 31 wallclock secs (12.16 usr 5.41 sys + 0.00 cusr 0.00 csys = 17.57 CPU)
Total time: 90 wallclock secs (35.94 usr 18.84 sys + 0.00 cusr 0.00 csys = 54.78 CPU)
Testing server 'MySQL 3.21.33' at 1998-08-21 12:06:50
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 54 secs ( 6.43 usr 42.04 sys = 48.47 cpu)
Test connect/simple select/disconnect
Time for select_simple (10000): 56 secs ( 7.46 usr 42.52 sys = 49.98 cpu)
Test simple select
Time for select_simple (10000): 4 secs ( 0.26 usr 1.81 sys = 2.07 cpu)
Testing connect/select 1 row from table/disconnect
Time to connect+select (10000): 58 secs ( 8.15 usr 42.24 sys = 50.39 cpu)
Testing select 1 row from table
Time to select (10000): 5 secs ( 0.42 usr 1.87 sys = 2.29 cpu)
Testing select 2 rows from table
Time to select (10000): 6 secs ( 0.34 usr 2.68 sys = 3.02 cpu)
Testing retrieval of big records (65000 bytes)
Time to select_big (10000): 47 secs ( 8.79 usr 16.29 sys = 25.08 cpu)
Total time: 230 secs (31.85 usr 149.46 sys = 181.31 cpu)
Testing server 'MySQL 3.21.34' at 1999-02-28 16:02:40
Testing the speed of creating and droping tables
All tests are done 1000 times
Testing create of tables
Time for create_table (1000): 5 wallclock secs ( 0.27 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Accessing tables
Time to select_group (1000): 1 wallclock secs ( 0.19 usr 0.11 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing drop
Time for drop_table (1000): 1 wallclock secs ( 0.08 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing create+drop
Time for create+drop (1000): 1 wallclock secs ( 0.37 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for create_key+drop (1000): 3 wallclock secs ( 0.77 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Total time: 11 wallclock secs ( 1.68 usr 0.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing server 'MySQL 3.23.3 alpha' at 1999-09-17 8:46:21
Testing the speed of creating and droping tables
All tests are done 1000 times
Okay..Let's make sure that our tables don't exist yet.
Testing create of tables
Time for create_table (1000): 4 wallclock secs ( 0.46 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Accessing tables
Time to select_group (1000): 1 wallclock secs ( 0.19 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing drop
Time for drop_table (1000): 1 wallclock secs ( 0.15 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing create+drop
Time for create+drop (1000): 1 wallclock secs ( 0.63 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for create_key+drop (1000): 2 wallclock secs ( 0.78 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 9 wallclock secs ( 2.21 usr 0.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.17 alpha' at 2000-06-01 3:13:52
Testing the speed of creating and droping tables
Testing with 10000 tables and 10000 loop count
Testing create of tables
Time for create_MANY_tables (10000): 263 wallclock secs ( 2.58 usr 0.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Accessing tables
Time to select_group_when_MANY_tables (10000): 97 wallclock secs ( 1.09 usr 0.52 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing drop
Time for drop_table_when_MANY_tables (10000): 22 wallclock secs ( 0.59 usr 0.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing create+drop
Time for create+drop (10000): 19 wallclock secs ( 2.43 usr 0.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for create_key+drop (10000): 18 wallclock secs ( 3.88 usr 0.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 419 wallclock secs (10.58 usr 3.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.29a gamma' at 2000-12-24 15:28:12
Testing the speed of creating and droping tables
Testing with 10000 tables and 10000 loop count
Testing create of tables
Time for create_MANY_tables (10000): 29 wallclock secs ( 2.27 usr 0.46 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Accessing tables
Time to select_group_when_MANY_tables (10000): 16 wallclock secs ( 0.79 usr 0.62 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing drop
Time for drop_table_when_MANY_tables (10000): 12 wallclock secs ( 0.63 usr 0.43 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing create+drop
Time for create+drop (10000): 29 wallclock secs ( 2.43 usr 0.82 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for create_key+drop (10000): 40 wallclock secs ( 3.97 usr 0.87 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 126 wallclock secs (10.09 usr 3.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.37' at 2001-04-13 1:14:05
Testing the speed of creating and droping tables
Testing with 10000 tables and 10000 loop count
Testing create of tables
Time for create_MANY_tables (10000): 43 wallclock secs ( 2.34 usr 2.21 sys + 0.00 cusr 0.00 csys = 4.55 CPU)
Accessing tables
Time to select_group_when_MANY_tables (10000): 20 wallclock secs ( 0.77 usr 1.34 sys + 0.00 cusr 0.00 csys = 2.11 CPU)
Testing drop
Time for drop_table_when_MANY_tables (10000): 19 wallclock secs ( 0.87 usr 0.84 sys + 0.00 cusr 0.00 csys = 1.71 CPU)
Testing create+drop
Time for create+drop (10000): 33 wallclock secs ( 2.64 usr 0.72 sys + 0.00 cusr 0.00 csys = 3.36 CPU)
Time for create_key+drop (10000): 39 wallclock secs ( 3.05 usr 0.72 sys + 0.00 cusr 0.00 csys = 3.77 CPU)
Total time: 154 wallclock secs ( 9.67 usr 5.83 sys + 0.00 cusr 0.00 csys = 15.50 CPU)
Testing server 'MySQL 3.21.33' at 1998-08-21 12:10:41
Testing the speed of creating and droping tables
All tests are done 1000 times
Testing create of tables
Time for create_table (1000): 8 secs ( 0.30 usr 0.22 sys = 0.52 cpu)
Accessing tables
Time to select_group (1000): 5 secs ( 0.08 usr 0.18 sys = 0.26 cpu)
Testing drop
Time for drop_table (1000): 1 secs ( 0.02 usr 0.06 sys = 0.08 cpu)
Testing create+drop
Time for create+drop (1000): 3 secs ( 0.20 usr 0.31 sys = 0.51 cpu)
Time for create_key+drop (1000): 3 secs ( 0.21 usr 0.45 sys = 0.66 cpu)
Total time: 20 secs ( 0.81 usr 1.22 sys = 2.03 cpu)
Testing server 'MySQL 3.21.34' at 1999-02-28 16:02:51
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 128 wallclock secs (30.13 usr 10.46 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing insert of duplicates
Time for insert_duplicates (300000): 26 wallclock secs ( 6.84 usr 3.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Retrieving data from the table
Time for select_big (10:3000000): 53 wallclock secs (28.53 usr 15.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for order_by_key (10:3000000): 71 wallclock secs (28.30 usr 20.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for order_by (10:3000000): 71 wallclock secs (28.62 usr 19.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for select_range_prefix (5010:42084): 24 wallclock secs ( 3.65 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for select_range (5010:42084): 15 wallclock secs ( 3.70 usr 0.90 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for select_key_prefix (200000): 247 wallclock secs (94.75 usr 12.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for select_key (200000): 213 wallclock secs (109.13 usr 13.71 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 14 wallclock secs ( 6.53 usr 0.91 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for select_range (20000:43500): 14 wallclock secs ( 5.80 usr 0.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for select_group (111): 80 wallclock secs ( 0.12 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for min_max_on_key (3000): 395 wallclock secs ( 1.64 usr 0.18 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for min_max (60): 23 wallclock secs ( 0.04 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for count_on_key (100): 37 wallclock secs ( 0.06 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for count (100): 61 wallclock secs ( 0.09 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing update with functions
Time for update_key (500): 0 wallclock secs ( 0.05 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for update_key_big (501): 52 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing delete
Time for delete_key (500): 12 wallclock secs ( 0.02 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for delete_big (12): 72 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 157 wallclock secs (14.40 usr 3.85 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing update of keys
Time for update_of_primary_key_many_keys (256): 72 wallclock secs ( 0.03 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Deleting everything from table
Time for delete_big (2): 133 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Total time: 1971 wallclock secs (362.50 usr 103.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing server 'MySQL 3.23.3 alpha' at 1999-09-17 8:46:31
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 148 wallclock secs (45.77 usr 8.55 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert of duplicates
Time for insert_duplicates (300000): 16 wallclock secs ( 5.74 usr 1.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data from the table
Time for select_big (10:3000000): 68 wallclock secs (46.81 usr 8.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_key (10:3000000): 69 wallclock secs (46.84 usr 7.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by (10:3000000): 90 wallclock secs (46.67 usr 8.04 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_diff_key (500:1000): 305 wallclock secs ( 0.55 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range_prefix (5010:42084): 20 wallclock secs ( 5.91 usr 0.57 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (5010:42084): 20 wallclock secs ( 6.05 usr 0.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key_prefix (200000): 406 wallclock secs (175.76 usr 12.40 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key (200000): 401 wallclock secs (164.79 usr 11.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 22 wallclock secs ( 9.32 usr 0.73 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (20000:43500): 22 wallclock secs ( 9.27 usr 0.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_group (111): 58 wallclock secs ( 0.11 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max_on_key (3000): 4 wallclock secs ( 2.23 usr 0.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max (60): 37 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_on_key (100): 52 wallclock secs ( 0.16 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count (100): 73 wallclock secs ( 0.08 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys with functions
Time for update_of_key (500): 1 wallclock secs ( 0.08 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for update_of_key_big (501): 29 wallclock secs ( 0.08 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update with key
Time for update_with_key (100000): 381 wallclock secs (57.20 usr 11.84 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of all rows
Time for update_big (500): 34 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing left outer join
Time for outer_join_on_key (10:10): 65 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join (10:10): 84 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_found (10:10): 82 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Note: Query took longer then time-limit: 600
Estimating end time based on:
102 queries in 101 loops of 500 loops took 605 seconds
Estimated time for outer_join_not_found (500:504): 605 wallclock secs ( 0.13 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing delete
Time for delete_key (500): 1 wallclock secs ( 0.12 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for delete_big (12): 20 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 209 wallclock secs (21.42 usr 3.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys
Time for update_of_key (256): 60 wallclock secs ( 0.03 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Deleting everything from table
Time for delete_big_many_keys (2): 257 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting 100000 rows with multiple values
Time for multiple_value_insert (100000): 9 wallclock secs ( 2.46 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop table(1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Estimated total time: 6039 wallclock secs (648.24 usr 76.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.17 alpha' at 2000-06-01 3:20:52
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 93 wallclock secs (17.97 usr 8.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert of duplicates
Time for insert_duplicates (300000): 14 wallclock secs ( 3.03 usr 3.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data from the table
Time for select_big (10:3000000): 31 wallclock secs (20.53 usr 10.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_key (10:3000000): 30 wallclock secs (20.22 usr 10.11 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by (10:3000000): 47 wallclock secs (20.41 usr 15.62 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_diff_key (500:1000): 174 wallclock secs ( 0.28 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range_prefix (5010:42084): 10 wallclock secs ( 2.73 usr 0.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (5010:42084): 10 wallclock secs ( 2.73 usr 0.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key_prefix (200000): 130 wallclock secs (66.91 usr 10.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key (200000): 128 wallclock secs (66.75 usr 11.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 8 wallclock secs ( 3.10 usr 0.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (20000:43500): 8 wallclock secs ( 3.45 usr 0.82 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_group (111): 52 wallclock secs ( 0.05 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max_on_key (15000): 9 wallclock secs ( 4.75 usr 0.64 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max (60): 29 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_on_key (100): 51 wallclock secs ( 0.06 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count (100): 42 wallclock secs ( 0.10 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_big (20): 62 wallclock secs ( 0.03 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys with functions
Time for update_of_key (500): 22 wallclock secs ( 2.61 usr 1.35 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for update_of_key_big (501): 25 wallclock secs ( 0.02 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update with key
Time for update_with_key (100000): 97 wallclock secs (15.38 usr 7.87 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of all rows
Time for update_big (500): 28 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing left outer join
Time for outer_join_on_key (10:10): 51 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join (10:10): 63 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_found (10:10): 60 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_not_found (500:10): 46 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing delete
Time for delete_key (10000): 3 wallclock secs ( 0.73 usr 0.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for delete_big (12): 15 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 111 wallclock secs (10.00 usr 3.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys
Time for update_of_key (256): 37 wallclock secs ( 0.04 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Deleting everything from table
Time for delete_big_many_keys (2): 59 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting 100000 rows with multiple values
Time for multiple_value_insert (100000): 7 wallclock secs ( 1.92 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop table(1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 1552 wallclock secs (263.84 usr 86.41 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.29a gamma' at 2000-12-24 6:49:34
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 111 wallclock secs (20.07 usr 12.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert of duplicates
Time for insert_duplicates (100000): 24 wallclock secs ( 3.18 usr 3.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data from the table
Time for select_big (10:3000000): 33 wallclock secs (22.37 usr 10.19 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_big_key (10:3000000): 34 wallclock secs (23.91 usr 10.39 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_big_key_desc (10:3000000): 36 wallclock secs (23.89 usr 10.41 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_big_key_prefix (10:3000000): 32 wallclock secs (22.21 usr 10.50 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_big_key2 (10:3000000): 33 wallclock secs (22.58 usr 10.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_big_key_diff (10:3000000): 50 wallclock secs (22.07 usr 16.43 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_big (10:3000000): 52 wallclock secs (22.29 usr 15.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_range (500:125750): 5 wallclock secs ( 1.14 usr 0.41 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_key_prefix (500:125750): 3 wallclock secs ( 1.10 usr 0.38 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_key2_diff (500:250500): 5 wallclock secs ( 1.12 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_diff_key (500:1000): 191 wallclock secs ( 0.33 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range_prefix (5010:42084): 11 wallclock secs ( 3.03 usr 0.63 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range_key2 (5010:42084): 10 wallclock secs ( 2.59 usr 0.68 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key_prefix (200000): 134 wallclock secs (71.19 usr 14.71 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key (200000): 126 wallclock secs (68.35 usr 14.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key2 (200000): 132 wallclock secs (69.41 usr 13.86 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 9 wallclock secs ( 3.79 usr 0.75 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range_key2 (20000:43500): 8 wallclock secs ( 3.77 usr 0.96 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_group (111): 54 wallclock secs ( 0.05 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max_on_key (15000): 8 wallclock secs ( 4.55 usr 0.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max (60): 32 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_on_key (100): 51 wallclock secs ( 0.09 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count (100): 47 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_big (20): 64 wallclock secs ( 0.02 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys with functions
Time for update_of_key (50000): 23 wallclock secs ( 2.80 usr 2.29 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for update_of_key_big (501): 33 wallclock secs ( 0.11 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update with key
Time for update_with_key (300000): 113 wallclock secs (17.01 usr 12.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for update_with_key_prefix (100000): 32 wallclock secs ( 5.68 usr 4.36 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of all rows
Time for update_big (10): 64 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing left outer join
Time for outer_join_on_key (10:10): 57 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join (10:10): 72 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_found (10:10): 67 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_not_found (500:10): 50 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing INSERT INTO ... SELECT
Time for insert_select_1_key (1): 7 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for insert_select_2_keys (1): 9 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop table(2): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing delete
Time for delete_key (10000): 4 wallclock secs ( 0.56 usr 0.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for delete_all (12): 20 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 154 wallclock secs (10.52 usr 4.42 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys
Time for update_of_primary_key_many_keys (256): 65 wallclock secs ( 0.02 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Deleting rows from the table
Time for delete_big_many_keys (128): 151 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Deleting everything from table
Time for delete_all_many_keys (1): 151 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting 100000 rows with multiple values
Time for multiple_value_insert (100000): 9 wallclock secs ( 1.88 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop table(1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 2227 wallclock secs (451.81 usr 170.75 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.37' at 2001-04-13 1:16:40
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 122 wallclock secs (20.64 usr 11.54 sys + 0.00 cusr 0.00 csys = 32.18 CPU)
Testing insert of duplicates
Time for insert_duplicates (100000): 31 wallclock secs ( 5.12 usr 4.16 sys + 0.00 cusr 0.00 csys = 9.28 CPU)
Retrieving data from the table
Time for select_big (10:3000000): 32 wallclock secs (21.80 usr 10.01 sys + 0.00 cusr 0.00 csys = 31.81 CPU)
Time for order_by_big_key (10:3000000): 35 wallclock secs (23.27 usr 10.06 sys + 0.00 cusr 0.00 csys = 33.33 CPU)
Time for order_by_big_key_desc (10:3000000): 37 wallclock secs (23.23 usr 10.33 sys + 0.00 cusr 0.00 csys = 33.56 CPU)
Time for order_by_big_key_prefix (10:3000000): 33 wallclock secs (21.44 usr 10.24 sys + 0.00 cusr 0.00 csys = 31.68 CPU)
Time for order_by_big_key2 (10:3000000): 32 wallclock secs (21.65 usr 10.20 sys + 0.00 cusr 0.00 csys = 31.85 CPU)
Time for order_by_big_key_diff (10:3000000): 73 wallclock secs (21.66 usr 10.13 sys + 0.00 cusr 0.00 csys = 31.79 CPU)
Time for order_by_big (10:3000000): 77 wallclock secs (21.83 usr 9.99 sys + 0.00 cusr 0.00 csys = 31.82 CPU)
Time for order_by_range (500:125750): 8 wallclock secs ( 1.39 usr 0.40 sys + 0.00 cusr 0.00 csys = 1.79 CPU)
Time for order_by_key_prefix (500:125750): 5 wallclock secs ( 1.17 usr 0.47 sys + 0.00 cusr 0.00 csys = 1.64 CPU)
Time for order_by_key2_diff (500:250500): 9 wallclock secs ( 1.45 usr 0.89 sys + 0.00 cusr 0.00 csys = 2.34 CPU)
Time for select_diff_key (500:1000): 202 wallclock secs ( 0.26 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.32 CPU)
Time for select_range_prefix (5010:42084): 13 wallclock secs ( 2.65 usr 0.50 sys + 0.00 cusr 0.00 csys = 3.15 CPU)
Time for select_range_key2 (5010:42084): 12 wallclock secs ( 2.37 usr 0.75 sys + 0.00 cusr 0.00 csys = 3.12 CPU)
Time for select_key_prefix (200000): 141 wallclock secs (80.77 usr 13.11 sys + 0.00 cusr 0.00 csys = 93.88 CPU)
Time for select_key (200000): 131 wallclock secs (69.07 usr 11.59 sys + 0.00 cusr 0.00 csys = 80.66 CPU)
Time for select_key_return_key (200000): 125 wallclock secs (62.45 usr 10.60 sys + 0.00 cusr 0.00 csys = 73.05 CPU)
Time for select_key2 (200000): 140 wallclock secs (87.29 usr 11.66 sys + 0.00 cusr 0.00 csys = 98.95 CPU)
Time for select_key2_return_key (200000): 133 wallclock secs (65.03 usr 10.13 sys + 0.00 cusr 0.00 csys = 75.16 CPU)
Time for select_key2_return_prim (200000): 133 wallclock secs (75.39 usr 11.05 sys + 0.00 cusr 0.00 csys = 86.44 CPU)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 10 wallclock secs ( 3.84 usr 0.60 sys + 0.00 cusr 0.00 csys = 4.44 CPU)
Time for select_range_key2 (20000:43500): 9 wallclock secs ( 3.40 usr 0.76 sys + 0.00 cusr 0.00 csys = 4.16 CPU)
Time for select_group (111): 60 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for min_max_on_key (15000): 9 wallclock secs ( 3.51 usr 0.42 sys + 0.00 cusr 0.00 csys = 3.93 CPU)
Time for min_max (60): 33 wallclock secs ( 0.02 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Time for count_on_key (100): 54 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Time for count (100): 45 wallclock secs ( 0.02 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Time for count_distinct_big (20): 97 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys with functions
Time for update_of_key (50000): 25 wallclock secs ( 2.74 usr 1.81 sys + 0.00 cusr 0.00 csys = 4.55 CPU)
Time for update_of_key_big (501): 36 wallclock secs ( 0.01 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.08 CPU)
Testing update with key
Time for update_with_key (300000): 119 wallclock secs (19.18 usr 12.34 sys + 0.00 cusr 0.00 csys = 31.52 CPU)
Time for update_with_key_prefix (100000): 36 wallclock secs ( 6.37 usr 3.90 sys + 0.00 cusr 0.00 csys = 10.27 CPU)
Testing update of all rows
Time for update_big (10): 65 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing left outer join
Time for outer_join_on_key (10:10): 58 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Time for outer_join (10:10): 112 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_found (10:10): 110 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_not_found (500:10): 60 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Testing INSERT INTO ... SELECT
Time for insert_select_1_key (1): 7 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for insert_select_2_keys (1): 10 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop table(2): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Testing delete
Time for delete_key (10000): 4 wallclock secs ( 0.63 usr 0.36 sys + 0.00 cusr 0.00 csys = 0.99 CPU)
Time for delete_all (12): 18 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 138 wallclock secs (10.42 usr 6.05 sys + 0.00 cusr 0.00 csys = 16.47 CPU)
Testing update of keys
Time for update_of_primary_key_many_keys (256): 43 wallclock secs ( 0.05 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.07 CPU)
Deleting rows from the table
Time for delete_big_many_keys (128): 77 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Deleting everything from table
Time for delete_all_many_keys (1): 77 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Inserting 100000 rows with multiple values
Time for multiple_value_insert (100000): 10 wallclock secs ( 1.86 usr 0.05 sys + 0.00 cusr 0.00 csys = 1.91 CPU)
Time for drop table(1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 2773 wallclock secs (682.10 usr 184.31 sys + 0.00 cusr 0.00 csys = 866.41 CPU)
Testing server 'MySQL 3.21.33' at 1998-08-21 13:27:34
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 176 secs (13.67 usr 34.60 sys = 48.27 cpu)
Testing insert of duplicates
Time for insert_duplicates (300000): 34 secs ( 4.70 usr 8.53 sys = 13.23 cpu)
Retrieving data from the table
Time for select_big (10:3000000): 55 secs ( 3.26 usr 51.46 sys = 54.72 cpu)
Time for order_by_key (5:3000000): 92 secs ( 1.88 usr 62.99 sys = 64.87 cpu)
Time for order_by (5:3000000): 91 secs ( 1.63 usr 63.23 sys = 64.86 cpu)
Time for select_range_prefix (5010:42084): 26 secs ( 1.15 usr 4.63 sys = 5.78 cpu)
Time for select_range (5010:42084): 18 secs ( 0.73 usr 5.18 sys = 5.91 cpu)
Time for select_key_prefix (200000): 270 secs (60.23 usr 105.01 sys = 165.24 cpu)
Time for select_key (200000): 247 secs (65.75 usr 99.71 sys = 165.46 cpu)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 16 secs ( 2.97 usr 6.00 sys = 8.97 cpu)
Time for select_range (20000:43500): 15 secs ( 3.01 usr 5.54 sys = 8.55 cpu)
Time for select_group (111): 92 secs ( 0.01 usr 0.10 sys = 0.11 cpu)
Time for min_max_on_key (3000): 427 secs ( 0.07 usr 2.19 sys = 2.26 cpu)
Time for min_max (60): 25 secs ( 0.00 usr 0.07 sys = 0.07 cpu)
Time for count_on_key (100): 41 secs ( 0.01 usr 0.11 sys = 0.12 cpu)
Time for count (100): 58 secs ( 0.00 usr 0.09 sys = 0.09 cpu)
Testing update with functions
Time for update_key (500): 259 secs ( 4.80 usr 10.25 sys = 15.05 cpu)
Time for update_key_big (105003): 85 secs ( 0.41 usr 0.31 sys = 0.72 cpu)
Testing delete
Time for delete_key (500): 0 secs ( 0.05 usr 0.06 sys = 0.11 cpu)
Time for delete_big (12): 27 secs ( 0.00 usr 0.01 sys = 0.01 cpu)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 698 secs ( 8.57 usr 12.08 sys = 20.65 cpu)
Testing update of keys
Time for update_key (256): 97 secs ( 0.01 usr 0.04 sys = 0.05 cpu)
Deleting everything from table
Time for delete_big (2): 120 secs ( 0.00 usr 0.00 sys = 0.00 cpu)
Total time: 2988 secs (172.92 usr 472.21 sys = 645.13 cpu)
Testing server 'MySQL 3.21.34' at 1999-02-28 16:35:44
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 5 wallclock secs ( 1.32 usr 0.39 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 0.23 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time for select_range (410:1057904): 320 wallclock secs (12.23 usr 6.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Note: Query took longer then time-limit: 600
Estimating end time based on:
48622 queries in 6946 loops of 10000 loops took 601 seconds
Estimated time for min_max_on_key (70000): 865 wallclock secs (38.44 usr 4.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Note: Query took longer then time-limit: 600
Estimating end time based on:
49800 queries in 9960 loops of 10000 loops took 601 seconds
Estimated time for count_on_key (50000): 603 wallclock secs (27.12 usr 3.46 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Estimated total time: 1794 wallclock secs (79.34 usr 14.54 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing server 'MySQL 3.23.3 alpha' at 1999-09-17 9:47:21
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 6 wallclock secs ( 2.10 usr 0.28 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 0.32 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (410:1057904): 345 wallclock secs (18.21 usr 2.75 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max_on_key (70000): 261 wallclock secs (51.47 usr 3.43 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_on_key (50000): 596 wallclock secs (38.79 usr 2.36 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 1209 wallclock secs (110.89 usr 8.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.17 alpha' at 2000-06-01 3:46:46
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 3 wallclock secs ( 0.64 usr 0.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 0.12 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (410:1057904): 190 wallclock secs ( 8.49 usr 3.90 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max_on_key (70000): 210 wallclock secs (21.63 usr 3.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_on_key (50000): 506 wallclock secs (17.66 usr 2.67 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_group_on_key_parts (0:0): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing count(distinct) on the table
Time for count_distinct (1000:2000): 110 wallclock secs ( 0.78 usr 0.10 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_group_on_key (1000:6000): 58 wallclock secs ( 0.41 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_group_on_key_parts (1000:100000): 74 wallclock secs ( 1.18 usr 0.60 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_group (1000:100000): 75 wallclock secs ( 1.20 usr 0.44 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_big (1000:10000000): 570 wallclock secs (73.95 usr 55.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 1797 wallclock secs (126.08 usr 67.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.29a gamma' at 2000-12-24 7:26:42
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 5 wallclock secs ( 1.03 usr 0.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 0.14 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (410:1057904): 194 wallclock secs ( 9.08 usr 3.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max_on_key (70000): 230 wallclock secs (22.79 usr 4.24 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_on_key (50000): 508 wallclock secs (16.98 usr 2.98 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_group_on_key_parts (1000:100000): 54 wallclock secs ( 1.00 usr 0.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing count(distinct) on the table
Time for count_distinct (2000:2000): 119 wallclock secs ( 0.66 usr 0.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_group_on_key (1000:6000): 60 wallclock secs ( 0.40 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_group_on_key_parts (1000:100000): 81 wallclock secs ( 0.79 usr 0.41 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_group (1000:100000): 80 wallclock secs ( 1.02 usr 0.43 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_big (100:1000000): 47 wallclock secs ( 7.46 usr 5.86 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 1379 wallclock secs (61.35 usr 18.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.37' at 2001-04-13 2:02:54
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 5 wallclock secs ( 0.80 usr 0.46 sys + 0.00 cusr 0.00 csys = 1.26 CPU)
Test if the database has a query cache
Time for select_query_cache (10000): 131 wallclock secs ( 3.58 usr 0.61 sys + 0.00 cusr 0.00 csys = 4.19 CPU)
Time for select_query_cache2 (10000): 131 wallclock secs ( 3.78 usr 0.68 sys + 0.00 cusr 0.00 csys = 4.46 CPU)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 0.14 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.20 CPU)
Time for select_range (410:1057904): 206 wallclock secs ( 8.92 usr 3.47 sys + 0.00 cusr 0.00 csys = 12.39 CPU)
Time for min_max_on_key (70000): 228 wallclock secs (24.88 usr 3.68 sys + 0.00 cusr 0.00 csys = 28.56 CPU)
Time for count_on_key (50000): 541 wallclock secs (17.89 usr 2.91 sys + 0.00 cusr 0.00 csys = 20.80 CPU)
Time for count_group_on_key_parts (1000:100000): 60 wallclock secs ( 1.17 usr 0.36 sys + 0.00 cusr 0.00 csys = 1.53 CPU)
Testing count(distinct) on the table
Time for count_distinct_key_prefix (1000:1000): 56 wallclock secs ( 0.40 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.45 CPU)
Time for count_distinct (1000:1000): 65 wallclock secs ( 0.34 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.39 CPU)
Time for count_distinct_2 (1000:1000): 65 wallclock secs ( 0.36 usr 0.12 sys + 0.00 cusr 0.00 csys = 0.48 CPU)
Time for count_distinct_group_on_key (1000:6000): 64 wallclock secs ( 0.44 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.51 CPU)
Time for count_distinct_group_on_key_parts (1000:100000): 191 wallclock secs ( 1.32 usr 0.34 sys + 0.00 cusr 0.00 csys = 1.66 CPU)
Time for count_distinct_group (1000:100000): 190 wallclock secs ( 1.21 usr 0.35 sys + 0.00 cusr 0.00 csys = 1.56 CPU)
Time for count_distinct_big (100:1000000): 65 wallclock secs ( 7.60 usr 3.30 sys + 0.00 cusr 0.00 csys = 10.90 CPU)
Total time: 1999 wallclock secs (72.84 usr 16.51 sys + 0.00 cusr 0.00 csys = 89.35 CPU)
Testing server 'MySQL 3.21.33' at 1998-08-21 14:17:23
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 6 secs ( 0.01 usr 1.78 sys = 1.79 cpu)
Testing big selects on the table
Time for select_big (70:17207): 1 secs ( 0.11 usr 0.25 sys = 0.36 cpu)
Time for select_range (410:1057904): 324 secs ( 6.24 usr 14.02 sys = 20.26 cpu)
Note: Query took longer then time-limit: 600
Estimating end time based on:
40173 queries in 5739 loops of 10000 loops took 601 seconds
Estimated time for min_max_on_key (70000): 1047 secs (14.57 usr 36.28 sys = 50.85 cpu)
Note: Query took longer then time-limit: 600
Estimating end time based on:
40640 queries in 8128 loops of 10000 loops took 601 seconds
Estimated time for count_on_key (50000): 739 secs (13.20 usr 22.31 sys = 35.51 cpu)
Estimated total time: 2117 secs (34.14 usr 74.63 sys = 108.77 cpu)
Testing server 'MySQL 3.21.34' at 1999-02-28 17:01:12
Wisconsin benchmark test
Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Inserting data
Time to insert (31000): 13 wallclock secs ( 2.22 usr 1.05 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Time to delete_big (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Running actual benchmark
Time for wisc_benchmark (114): 6 wallclock secs ( 2.48 usr 1.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Total time: 20 wallclock secs ( 4.70 usr 2.13 sys + 0.00 cusr 0.00 csys = 0.00 CPU secs)
Testing server 'MySQL 3.23.3 alpha' at 1999-09-17 10:07:31
Wisconsin benchmark test
Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (31000): 15 wallclock secs ( 4.40 usr 0.87 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time to delete_big (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Running actual benchmark
Time for wisc_benchmark (114): 7 wallclock secs ( 3.42 usr 0.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 23 wallclock secs ( 7.83 usr 1.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.17 alpha' at 2000-06-01 4:16:45
Wisconsin benchmark test
Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (31000): 11 wallclock secs ( 1.47 usr 0.89 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time to delete_big (1): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Running actual benchmark
Time for wisc_benchmark (114): 4 wallclock secs ( 1.72 usr 0.77 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 15 wallclock secs ( 3.20 usr 1.66 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.29a gamma' at 2000-12-24 7:49:42
Testing server 'MySQL 3.23.37' at 2001-04-13 2:36:13
Wisconsin benchmark test
Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for create_table (3): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (31000): 12 wallclock secs ( 1.45 usr 1.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time to insert (31000): 13 wallclock secs ( 1.72 usr 1.20 sys + 0.00 cusr 0.00 csys = 2.92 CPU)
Time to delete_big (1): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Running actual benchmark
Time for wisc_benchmark (114): 4 wallclock secs ( 1.80 usr 0.69 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for wisc_benchmark (114): 5 wallclock secs ( 2.23 usr 0.61 sys + 0.00 cusr 0.00 csys = 2.84 CPU)
Total time: 16 wallclock secs ( 3.25 usr 1.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 19 wallclock secs ( 3.95 usr 1.81 sys + 0.00 cusr 0.00 csys = 5.76 CPU)
Testing server 'MySQL 3.21.33' at 1998-08-21 14:42:57
Wisconsin benchmark test
Time for create_table (3): 0 secs ( 0.00 usr 0.00 sys = 0.00 cpu)
Inserting data
Time to insert (31000): 19 secs ( 1.36 usr 2.53 sys = 3.89 cpu)
Time to delete_big (1): 1 secs ( 0.00 usr 0.00 sys = 0.00 cpu)
Running actual benchmark
Time for wisc_benchmark (114): 7 secs ( 1.04 usr 2.86 sys = 3.90 cpu)
Total time: 27 secs ( 2.40 usr 5.39 sys = 7.79 cpu)
......@@ -195,7 +195,7 @@ sub new
$limits{'working_blobs'} = 0; # HEAP tables can't handle BLOB's
}
if (defined($main::opt_create_options) &&
$main::opt_create_options =~ /type=innobase/i)
$main::opt_create_options =~ /type=innodb/i)
{
$limits{'max_text_size'} = 8000; # Limit in Innobase
}
......
......@@ -21,7 +21,7 @@ MYSQLDATAdir = $(localstatedir)
MYSQLSHAREdir = $(pkgdatadir)
MYSQLBASEdir= $(prefix)
INCLUDES = @MT_INCLUDES@ \
@bdb_includes@ @innobase_includes@ @gemini_includes@ \
@bdb_includes@ @innodb_includes@ @gemini_includes@ \
-I$(srcdir)/../include \
-I$(srcdir)/../regex \
-I$(srcdir) -I../include -I.. -I.
......
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & Innobase Oy
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & InnoDB Oy
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -14,10 +14,10 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/* This file defines the Innobase handler: the interface between MySQL and
Innobase */
/* This file defines the InnoDB handler: the interface between MySQL and
InnoDB */
/* TODO list for the Innobase handler:
/* TODO list for the InnoDB handler:
- Ask Monty if strings of different languages can exist in the same
database. Answer: in near future yes, but not yet.
*/
......@@ -40,7 +40,7 @@ Innobase */
/* We must declare this here because we undef SAFE_MUTEX below */
pthread_mutex_t innobase_mutex;
/* Store MySQL definition of 'byte': in Linux it is char while Innobase
/* Store MySQL definition of 'byte': in Linux it is char while InnoDB
uses unsigned char */
typedef byte mysql_byte;
......@@ -50,7 +50,7 @@ typedef byte mysql_byte;
#define INSIDE_HA_INNOBASE_CC
/* Include necessary Innobase headers */
/* Include necessary InnoDB headers */
extern "C" {
#include "../innobase/include/univ.i"
#include "../innobase/include/srv0start.h"
......@@ -72,7 +72,7 @@ extern "C" {
#define HA_INNOBASE_ROWS_IN_TABLE 10000 /* to get optimization right */
#define HA_INNOBASE_RANGE_COUNT 100
bool innobase_skip = 0;
bool innodb_skip = 0;
uint innobase_init_flags = 0;
ulong innobase_cache_size = 0;
......@@ -88,7 +88,7 @@ bool innobase_flush_log_at_trx_commit, innobase_log_archive,
/* innobase_data_file_path=ibdata:15,idata2:1,... */
/* The following counter is used to convey information to Innobase
/* The following counter is used to convey information to InnoDB
about server activity: in selects it is not sensible to call
srv_active_wake_master_thread after each fetch or search, we only do
it every INNOBASE_WAKE_INTERVAL'th step. */
......@@ -128,13 +128,13 @@ innobase_active_small(void)
}
/************************************************************************
Converts an Innobase error code to a MySQL error code. */
Converts an InnoDB error code to a MySQL error code. */
static
int
convert_error_code_to_mysql(
/*========================*/
/* out: MySQL error code */
int error) /* in: Innobase error code */
int error) /* in: InnoDB error code */
{
if (error == DB_SUCCESS) {
......@@ -180,14 +180,14 @@ convert_error_code_to_mysql(
}
/*************************************************************************
Gets the Innobase transaction handle for a MySQL handler object, creates
an Innobase transaction struct if the corresponding MySQL thread struct still
Gets the InnoDB transaction handle for a MySQL handler object, creates
an InnoDB transaction struct if the corresponding MySQL thread struct still
lacks one. */
static
trx_t*
check_trx_exists(
/*=============*/
/* out: Innobase transaction handle */
/* out: InnoDB transaction handle */
THD* thd) /* in: user thread handle */
{
trx_t* trx;
......@@ -201,10 +201,10 @@ check_trx_exists(
thd->transaction.all.innobase_tid = trx;
/* The execution of a single SQL statement is denoted by
a 'transaction' handle which is a dummy pointer: Innobase
a 'transaction' handle which is a dummy pointer: InnoDB
remembers internally where the latest SQL statement
started, and if error handling requires rolling back the
latest statement, Innobase does a rollback to a savepoint. */
latest statement, InnoDB does a rollback to a savepoint. */
thd->transaction.stmt.innobase_tid =
(void*)&innodb_dummy_stmt_trx_handle;
......@@ -214,7 +214,7 @@ check_trx_exists(
}
/*************************************************************************
Updates the user_thd field in a handle and also allocates a new Innobase
Updates the user_thd field in a handle and also allocates a new InnoDB
transaction handle if needed, and updates the transaction fields in the
prebuilt struct. */
inline
......@@ -418,7 +418,7 @@ innobase_parse_log_group_home_dirs(void)
}
/*************************************************************************
Opens an Innobase database. */
Opens an InnoDB database. */
bool
innobase_init(void)
......@@ -443,14 +443,14 @@ innobase_init(void)
current_dir[1]=FN_LIBCHAR;
current_dir[2]=0;
/* Set Innobase initialization parameters according to the values
/* Set InnoDB initialization parameters according to the values
read from MySQL .cnf file */
if (!innobase_data_file_path)
{
fprintf(stderr,
"Can't initialize Innobase as 'innobase_data_file_path' is not set\n");
innobase_skip=1;
"Can't initialize InnoDB as 'innobase_data_file_path' is not set\n");
innodb_skip=1;
DBUG_RETURN(FALSE); // Continue without innobase
}
......@@ -504,7 +504,7 @@ innobase_init(void)
}
/***********************************************************************
Closes an Innobase database. */
Closes an InnoDB database. */
bool
innobase_end(void)
......@@ -527,7 +527,7 @@ innobase_end(void)
}
/********************************************************************
Flushes Innobase logs to disk and makes a checkpoint. Really, a commit
Flushes InnoDB logs to disk and makes a checkpoint. Really, a commit
flushes logs, and the name of this function should be innobase_checkpoint. */
bool
......@@ -545,7 +545,7 @@ innobase_flush_logs(void)
}
/*************************************************************************
Gets the free space in an Innobase database: returned in units of kB. */
Gets the free space in an InnoDB database: returned in units of kB. */
uint
innobase_get_free_space(void)
......@@ -556,7 +556,7 @@ innobase_get_free_space(void)
}
/*********************************************************************
Commits a transaction in an Innobase database. */
Commits a transaction in an InnoDB database. */
int
innobase_commit(
......@@ -564,7 +564,7 @@ innobase_commit(
/* out: 0 or error number */
THD* thd, /* in: MySQL thread handle of the user for whom
the transaction should be committed */
void* trx_handle)/* in: Innobase trx handle or NULL: NULL means
void* trx_handle)/* in: InnoDB trx handle or NULL: NULL means
that the current SQL statement ended, and we should
mark the start of a new statement with a savepoint */
{
......@@ -588,7 +588,7 @@ innobase_commit(
DBUG_PRINT("error", ("error: %d", error));
}
#endif
/* Tell Innobase server that there might be work for
/* Tell InnoDB server that there might be work for
utility threads: */
srv_active_wake_master_thread();
......@@ -597,7 +597,7 @@ innobase_commit(
}
/*********************************************************************
Rolls back a transaction in an Innobase database. */
Rolls back a transaction in an InnoDB database. */
int
innobase_rollback(
......@@ -605,7 +605,7 @@ innobase_rollback(
/* out: 0 or error number */
THD* thd, /* in: handle to the MySQL thread of the user
whose transaction should be rolled back */
void* trx_handle)/* in: Innobase trx handle or a dummy stmt handle */
void* trx_handle)/* in: InnoDB trx handle or a dummy stmt handle */
{
int error = 0;
trx_t* trx;
......@@ -626,7 +626,7 @@ innobase_rollback(
}
/*********************************************************************
Frees a possible Innobase trx object associated with the current
Frees a possible InnoDB trx object associated with the current
THD. */
int
......@@ -660,7 +660,7 @@ innobase_print_error(
/*****************************************************************************
** Innobase database tables
** InnoDB database tables
*****************************************************************************/
/********************************************************************
......@@ -770,7 +770,7 @@ ha_innobase::open(
ref_length = buff_len;
/* Get pointer to a table object in Innobase dictionary cache */
/* Get pointer to a table object in InnoDB dictionary cache */
if (NULL == (ib_table = dict_table_get(norm_name, NULL))) {
......@@ -824,7 +824,7 @@ ha_innobase::initialize(void)
}
/**********************************************************************
Closes a handle to an Innobase table. */
Closes a handle to an InnoDB table. */
int
ha_innobase::close(void)
......@@ -838,7 +838,7 @@ ha_innobase::close(void)
my_free((char*) upd_buff, MYF(0));
free_share(share);
/* Tell Innobase server that there might be work for
/* Tell InnoDB server that there might be work for
utility threads: */
srv_active_wake_master_thread();
......@@ -924,9 +924,9 @@ reset_null_bits(
extern "C" {
/*****************************************************************
Innobase uses this function is to compare two data fields for which the
InnoDB uses this function is to compare two data fields for which the
data type is such that we must use MySQL code to compare them. NOTE that the
prototype of this function is in rem0cmp.c in Innobase source code!
prototype of this function is in rem0cmp.c in InnoDB source code!
If you change this function, remember to update the prototype there! */
int
......@@ -972,7 +972,7 @@ innobase_mysql_cmp(
}
/******************************************************************
Converts a MySQL type to an Innobase type. */
Converts a MySQL type to an InnoDB type. */
inline
ulint
get_innobase_type_from_mysql_type(
......@@ -1231,7 +1231,7 @@ build_template(
}
/************************************************************************
Stores a row in an Innobase database, to the table specified in this
Stores a row in an InnoDB database, to the table specified in this
handle. */
int
......@@ -1295,7 +1295,7 @@ ha_innobase::write_row(
error = convert_error_code_to_mysql(error);
/* Tell Innobase server that there might be work for
/* Tell InnoDB server that there might be work for
utility threads: */
innobase_active_small();
......@@ -1304,7 +1304,7 @@ ha_innobase::write_row(
}
/******************************************************************
Converts field data for storage in an Innobase update vector. */
Converts field data for storage in an InnoDB update vector. */
inline
mysql_byte*
innobase_convert_and_store_changed_col(
......@@ -1315,7 +1315,7 @@ innobase_convert_and_store_changed_col(
mysql_byte* buf, /* in: buffer we can use in conversion */
mysql_byte* data, /* in: column data to store */
ulint len, /* in: data len */
ulint col_type,/* in: data type in Innobase type numbers */
ulint col_type,/* in: data type in InnoDB type numbers */
ulint is_unsigned)/* in: != 0 if an unsigned integer type */
{
uint i;
......@@ -1330,7 +1330,7 @@ innobase_convert_and_store_changed_col(
}
} else if (col_type == DATA_INT) {
/* Store integer data in Innobase in a big-endian
/* Store integer data in InnoDB in a big-endian
format, sign bit negated, if signed */
for (i = 0; i < len; i++) {
......@@ -1365,7 +1365,7 @@ calc_row_difference(
mysql_byte* new_row, /* in: new row in MySQL format */
struct st_table* table, /* in: table in MySQL data dictionary */
mysql_byte* upd_buff, /* in: buffer to use */
row_prebuilt_t* prebuilt, /* in: Innobase prebuilt struct */
row_prebuilt_t* prebuilt, /* in: InnoDB prebuilt struct */
THD* thd) /* in: user thread */
{
Field* field;
......@@ -1460,7 +1460,7 @@ calc_row_difference(
Updates a row given as a parameter to a new value. Note that we are given
whole rows, not just the fields which are updated: this incurs some
overhead for CPU when we check which fields are actually updated.
TODO: currently Innobase does not prevent the 'Halloween problem':
TODO: currently InnoDB does not prevent the 'Halloween problem':
in a searched update a single row can get updated several times
if its index columns are updated! */
......@@ -1504,7 +1504,7 @@ ha_innobase::update_row(
error = convert_error_code_to_mysql(error);
/* Tell Innobase server that there might be work for
/* Tell InnoDB server that there might be work for
utility threads: */
innobase_active_small();
......@@ -1544,7 +1544,7 @@ ha_innobase::delete_row(
error = convert_error_code_to_mysql(error);
/* Tell the Innobase server that there might be work for
/* Tell the InnoDB server that there might be work for
utility threads: */
innobase_active_small();
......@@ -1584,7 +1584,7 @@ ha_innobase::index_end(void)
/*************************************************************************
Converts a search mode flag understood by MySQL to a flag understood
by Innobase. */
by InnoDB. */
inline
ulint
convert_search_mode_to_innobase(
......@@ -1710,7 +1710,7 @@ ha_innobase::change_active_index(
/* out: 0 or error code */
uint keynr) /* in: use this index; MAX_KEY means always clustered
index, even if it was internally generated by
Innobase */
InnoDB */
{
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
KEY* key;
......@@ -2011,7 +2011,7 @@ ha_innobase::rnd_pos(
Stores a reference to the current row to 'ref' field of the handle. Note
that the function parameter is illogical: we must assume that 'record'
is the current 'position' of the handle, because if row ref is actually
the row id internally generated in Innobase, then 'record' does not contain
the row id internally generated in InnoDB, then 'record' does not contain
it. We just guess that the row id must be for the record where the handle
was positioned the last time. */
......@@ -2073,7 +2073,7 @@ int ha_innobase::reset(void)
As MySQL will execute an external lock for every new table it uses when it
starts to process an SQL statement, we can use this function to store the
pointer to the THD in the handle. We will also use this function to communicate
to Innobase that a new SQL statement has started and that we must store a
to InnoDB that a new SQL statement has started and that we must store a
savepoint to our transaction handle, so that we are able to roll back
the SQL statement in case of an error. */
......@@ -2122,12 +2122,12 @@ ha_innobase::external_lock(
}
/*********************************************************************
Creates a table definition to an Innobase database. */
Creates a table definition to an InnoDB database. */
static
int
create_table_def(
/*=============*/
trx_t* trx, /* in: Innobase transaction handle */
trx_t* trx, /* in: InnoDB transaction handle */
TABLE* form, /* in: information on table
columns and indexes */
const char* table_name) /* in: table name */
......@@ -2181,12 +2181,12 @@ create_table_def(
}
/*********************************************************************
Creates an index in an Innobase database. */
Creates an index in an InnoDB database. */
static
int
create_index(
/*=========*/
trx_t* trx, /* in: Innobase transaction handle */
trx_t* trx, /* in: InnoDB transaction handle */
TABLE* form, /* in: information on table
columns and indexes */
const char* table_name, /* in: table name */
......@@ -2216,7 +2216,7 @@ create_index(
ind_type = ind_type | DICT_UNIQUE;
}
/* The '0' below specifies that everything in Innobase is currently
/* The '0' below specifies that everything in InnoDB is currently
created in tablespace 0 */
index = dict_mem_index_create((char*) table_name, key->name, 0,
......@@ -2238,19 +2238,19 @@ create_index(
}
/*********************************************************************
Creates an index to an Innobase table when the user has defined no
Creates an index to an InnoDB table when the user has defined no
primary index. */
static
int
create_clustered_index_when_no_primary(
/*===================================*/
trx_t* trx, /* in: Innobase transaction handle */
trx_t* trx, /* in: InnoDB transaction handle */
const char* table_name) /* in: table name */
{
dict_index_t* index;
int error;
/* The first '0' below specifies that everything in Innobase is
/* The first '0' below specifies that everything in InnoDB is
currently created in file space 0 */
index = dict_mem_index_create((char*) table_name,
......@@ -2264,7 +2264,7 @@ create_clustered_index_when_no_primary(
}
/*********************************************************************
Creates a new table to an Innobase database. */
Creates a new table to an InnoDB database. */
int
ha_innobase::create(
......@@ -2292,7 +2292,7 @@ ha_innobase::create(
normalize_table_name(norm_name, name2);
/* Create the table definition in Innobase */
/* Create the table definition in InnoDB */
if ((error = create_table_def(trx, form, norm_name))) {
......@@ -2323,7 +2323,7 @@ ha_innobase::create(
if (form->keys == 0 || primary_key_no == -1) {
/* Create an index which is used as the clustered index;
order the rows by their row id which is internally generated
by Innobase */
by InnoDB */
error = create_clustered_index_when_no_primary(trx,
norm_name);
......@@ -2337,7 +2337,7 @@ ha_innobase::create(
}
if (primary_key_no != -1) {
/* In Innobase the clustered index must always be created
/* In InnoDB the clustered index must always be created
first */
if ((error = create_index(trx, form, norm_name,
(uint) primary_key_no))) {
......@@ -2370,7 +2370,7 @@ ha_innobase::create(
assert(innobase_table);
/* Tell the Innobase server that there might be work for
/* Tell the InnoDB server that there might be work for
utility threads: */
srv_active_wake_master_thread();
......@@ -2381,10 +2381,10 @@ ha_innobase::create(
}
/*********************************************************************
Drops a table from an Innobase database. Before calling this function,
Drops a table from an InnoDB database. Before calling this function,
MySQL calls innobase_commit to commit the transaction of the current user.
Then the current user cannot have locks set on the table. Drop table
operation inside Innobase will wait sleeping in a loop until no other
operation inside InnoDB will wait sleeping in a loop until no other
user has locks on the table. */
int
......@@ -2411,11 +2411,11 @@ ha_innobase::delete_table(
normalize_table_name(norm_name, name);
/* Drop the table in Innobase */
/* Drop the table in InnoDB */
error = row_drop_table_for_mysql(norm_name, trx, FALSE);
/* Tell the Innobase server that there might be work for
/* Tell the InnoDB server that there might be work for
utility threads: */
srv_active_wake_master_thread();
......@@ -2428,7 +2428,7 @@ ha_innobase::delete_table(
}
/*************************************************************************
Renames an Innobase table. */
Renames an InnoDB table. */
int
ha_innobase::rename_table(
......@@ -2457,11 +2457,11 @@ ha_innobase::rename_table(
normalize_table_name(norm_from, from);
normalize_table_name(norm_to, to);
/* Rename the table in Innobase */
/* Rename the table in InnoDB */
error = row_rename_table_for_mysql(norm_from, norm_to, trx);
/* Tell the Innobase server that there might be work for
/* Tell the InnoDB server that there might be work for
utility threads: */
srv_active_wake_master_thread();
......@@ -2630,7 +2630,7 @@ ha_innobase::info(
}
}
/* The trx struct in Innobase contains a pthread mutex embedded:
/* The trx struct in InnoDB contains a pthread mutex embedded:
in the debug version of MySQL that it replaced by a 'safe mutex'
which is of a different size. We have to use a function to access
trx fields. Otherwise trx->error_info will be a random
......@@ -2646,7 +2646,7 @@ ha_innobase::info(
}
/*****************************************************************
Adds information about free space in the Innobase tablespace to a
Adds information about free space in the InnoDB tablespace to a
table comment which is printed out when a user calls SHOW TABLE STATUS. */
char*
......@@ -2668,7 +2668,7 @@ ha_innobase::update_table_comment(
*pos++=';';
*pos++=' ';
}
sprintf(pos, "Innobase free: %lu kB", (ulong) innobase_get_free_space());
sprintf(pos, "InnoDB free: %lu kB", (ulong) innobase_get_free_space());
return(str);
}
......
......@@ -149,8 +149,8 @@ class ha_innobase: public handler
enum thr_lock_type lock_type);
};
extern bool innobase_skip;
extern SHOW_COMP_OPTION have_innobase;
extern bool innodb_skip;
extern SHOW_COMP_OPTION have_innodb;
extern uint innobase_init_flags, innobase_lock_type;
extern ulong innobase_cache_size;
extern char *innobase_home, *innobase_tmpdir, *innobase_logdir;
......
......@@ -79,7 +79,7 @@ enum db_type ha_checktype(enum db_type database_type)
#endif
#ifdef HAVE_INNOBASE_DB
case DB_TYPE_INNOBASE:
return(innobase_skip ? DB_TYPE_MYISAM : database_type);
return(innodb_skip ? DB_TYPE_MYISAM : database_type);
#endif
#ifdef HAVE_GEMINI_DB
case DB_TYPE_GEMINI:
......@@ -156,14 +156,14 @@ int ha_init()
}
#endif
#ifdef HAVE_INNOBASE_DB
if (!innobase_skip)
if (!innodb_skip)
{
if (innobase_init())
return -1;
if (!innobase_skip) // If we couldn't use handler
if (!innodb_skip) // If we couldn't use handler
opt_using_transactions=1;
else
have_innobase=SHOW_OPTION_DISABLED;
have_innodb=SHOW_OPTION_DISABLED;
}
#endif
#ifdef HAVE_GEMINI_DB
......@@ -201,7 +201,7 @@ int ha_panic(enum ha_panic_function flag)
error|=berkeley_end();
#endif
#ifdef HAVE_INNOBASE_DB
if (!innobase_skip)
if (!innodb_skip)
error|=innobase_end();
#endif
#ifdef HAVE_GEMINI_DB
......@@ -215,7 +215,7 @@ int ha_panic(enum ha_panic_function flag)
void ha_close_connection(THD* thd)
{
#ifdef HAVE_INNOBASE_DB
if (!innobase_skip)
if (!innodb_skip)
innobase_close_connection(thd);
#endif
#ifdef HAVE_GEMINI_DB
......@@ -380,7 +380,7 @@ bool ha_flush_logs()
result=1;
#endif
#ifdef HAVE_INNOBASE_DB
if (!innobase_skip && innobase_flush_logs())
if (!innodb_skip && innobase_flush_logs())
result=1;
#endif
return result;
......
......@@ -165,9 +165,9 @@ SHOW_COMP_OPTION have_gemini=SHOW_OPTION_YES;
SHOW_COMP_OPTION have_gemini=SHOW_OPTION_NO;
#endif
#ifdef HAVE_INNOBASE_DB
SHOW_COMP_OPTION have_innobase=SHOW_OPTION_YES;
SHOW_COMP_OPTION have_innodb=SHOW_OPTION_YES;
#else
SHOW_COMP_OPTION have_innobase=SHOW_OPTION_NO;
SHOW_COMP_OPTION have_innodb=SHOW_OPTION_NO;
#endif
#ifndef NO_ISAM
SHOW_COMP_OPTION have_isam=SHOW_OPTION_YES;
......@@ -2481,14 +2481,14 @@ enum options {
OPT_REPLICATE_WILD_IGNORE_TABLE,
OPT_DISCONNECT_SLAVE_EVENT_COUNT,
OPT_ABORT_SLAVE_EVENT_COUNT,
OPT_INNOBASE_DATA_HOME_DIR,
OPT_INNOBASE_DATA_FILE_PATH,
OPT_INNOBASE_LOG_GROUP_HOME_DIR,
OPT_INNOBASE_LOG_ARCH_DIR,
OPT_INNOBASE_LOG_ARCHIVE,
OPT_INNOBASE_FLUSH_LOG_AT_TRX_COMMIT,
OPT_INNODB_DATA_HOME_DIR,
OPT_INNODB_DATA_FILE_PATH,
OPT_INNODB_LOG_GROUP_HOME_DIR,
OPT_INNODB_LOG_ARCH_DIR,
OPT_INNODB_LOG_ARCHIVE,
OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT,
OPT_SAFE_SHOW_DB,
OPT_GEMINI_SKIP, OPT_INNOBASE_SKIP,
OPT_GEMINI_SKIP, OPT_INNODB_SKIP,
OPT_TEMP_POOL, OPT_DO_PSTACK, OPT_TX_ISOLATION,
OPT_GEMINI_FLUSH_LOG, OPT_GEMINI_RECOVER,
OPT_GEMINI_UNBUFFERED_IO, OPT_SKIP_SAFEMALLOC,
......@@ -2537,19 +2537,19 @@ static struct option long_options[] = {
#endif
/* We must always support this option to make scripts like mysqltest easier
to do */
{"innobase_data_file_path", required_argument, 0,
OPT_INNOBASE_DATA_FILE_PATH},
{"innodb_data_file_path", required_argument, 0,
OPT_INNODB_DATA_FILE_PATH},
#ifdef HAVE_INNOBASE_DB
{"innobase_data_home_dir", required_argument, 0,
OPT_INNOBASE_DATA_HOME_DIR},
{"innobase_log_group_home_dir", required_argument, 0,
OPT_INNOBASE_LOG_GROUP_HOME_DIR},
{"innobase_log_arch_dir", required_argument, 0,
OPT_INNOBASE_LOG_ARCH_DIR},
{"innobase_log_archive", optional_argument, 0,
OPT_INNOBASE_LOG_ARCHIVE},
{"innobase_flush_log_at_trx_commit", optional_argument, 0,
OPT_INNOBASE_FLUSH_LOG_AT_TRX_COMMIT},
{"innodb_data_home_dir", required_argument, 0,
OPT_INNODB_DATA_HOME_DIR},
{"innodb_log_group_home_dir", required_argument, 0,
OPT_INNODB_LOG_GROUP_HOME_DIR},
{"innodb_log_arch_dir", required_argument, 0,
OPT_INNODB_LOG_ARCH_DIR},
{"innodb_log_archive", optional_argument, 0,
OPT_INNODB_LOG_ARCHIVE},
{"innodb_flush_log_at_trx_commit", optional_argument, 0,
OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT},
#endif
{"help", no_argument, 0, '?'},
{"init-file", required_argument, 0, (int) OPT_INIT_FILE},
......@@ -2607,7 +2607,7 @@ static struct option long_options[] = {
{"server-id", required_argument, 0, (int) OPT_SERVER_ID},
{"set-variable", required_argument, 0, 'O'},
{"skip-bdb", no_argument, 0, (int) OPT_BDB_SKIP},
{"skip-innobase", no_argument, 0, (int) OPT_INNOBASE_SKIP},
{"skip-innodb", no_argument, 0, (int) OPT_INNODB_SKIP},
{"skip-gemini", no_argument, 0, (int) OPT_GEMINI_SKIP},
{"skip-concurrent-insert", no_argument, 0, (int) OPT_SKIP_CONCURRENT_INSERT},
{"skip-delay-key-write", no_argument, 0, (int) OPT_SKIP_DELAY_KEY_WRITE},
......@@ -2681,25 +2681,25 @@ CHANGEABLE_VAR changeable_vars[] = {
1, 0, LONG_MAX, 0, 1 },
#endif
#ifdef HAVE_INNOBASE_DB
{"innobase_mirrored_log_groups",
{"innodb_mirrored_log_groups",
(long*) &innobase_mirrored_log_groups, 1, 1, 10, 0, 1},
{"innobase_log_files_in_group",
{"innodb_log_files_in_group",
(long*) &innobase_log_files_in_group, 2, 2, 100, 0, 1},
{"innobase_log_file_size",
{"innodb_log_file_size",
(long*) &innobase_log_file_size, 5*1024*1024L, 1*1024*1024L,
~0L, 0, 1024*1024L},
{"innobase_log_buffer_size",
{"innodb_log_buffer_size",
(long*) &innobase_log_buffer_size, 1024*1024L, 256*1024L,
~0L, 0, 1024},
{"innobase_buffer_pool_size",
{"innodb_buffer_pool_size",
(long*) &innobase_buffer_pool_size, 8*1024*1024L, 1024*1024L,
~0L, 0, 1024*1024L},
{"innobase_additional_mem_pool_size",
{"innodb_additional_mem_pool_size",
(long*) &innobase_additional_mem_pool_size, 1*1024*1024L, 512*1024L,
~0L, 0, 1024},
{"innobase_file_io_threads",
{"innodb_file_io_threads",
(long*) &innobase_file_io_threads, 9, 4, 64, 0, 1},
{"innobase_lock_wait_timeout",
{"innodb_lock_wait_timeout",
(long*) &innobase_lock_wait_timeout, 1024 * 1024 * 1024, 1,
1024 * 1024 * 1024, 0, 1},
#endif
......@@ -2816,18 +2816,18 @@ struct show_var_st init_vars[]= {
#endif
{"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE},
{"have_gemini", (char*) &have_gemini, SHOW_HAVE},
{"have_innobase", (char*) &have_innobase, SHOW_HAVE},
{"have_innodb", (char*) &have_innodb, SHOW_HAVE},
{"have_isam", (char*) &have_isam, SHOW_HAVE},
{"have_raid", (char*) &have_raid, SHOW_HAVE},
{"have_ssl", (char*) &have_ssl, SHOW_HAVE},
{"init_file", (char*) &opt_init_file, SHOW_CHAR_PTR},
#ifdef HAVE_INNOBASE_DB
{"innobase_data_file_path", (char*) &innobase_data_file_path, SHOW_CHAR_PTR},
{"innobase_data_home_dir", (char*) &innobase_data_home_dir, SHOW_CHAR_PTR},
{"innobase_flush_log_at_trx_commit", (char*) &innobase_flush_log_at_trx_commit, SHOW_MY_BOOL},
{"innobase_log_arch_dir", (char*) &innobase_log_arch_dir, SHOW_CHAR_PTR},
{"innobase_log_archive", (char*) &innobase_log_archive, SHOW_MY_BOOL},
{"innobase_log_group_home_dir", (char*) &innobase_log_group_home_dir, SHOW_CHAR_PTR},
{"innodb_data_file_path", (char*) &innobase_data_file_path, SHOW_CHAR_PTR},
{"innodb_data_home_dir", (char*) &innobase_data_home_dir, SHOW_CHAR_PTR},
{"innodb_flush_log_at_trx_commit", (char*) &innobase_flush_log_at_trx_commit, SHOW_MY_BOOL},
{"innodb_log_arch_dir", (char*) &innobase_log_arch_dir, SHOW_CHAR_PTR},
{"innodb_log_archive", (char*) &innobase_log_archive, SHOW_MY_BOOL},
{"innodb_log_group_home_dir", (char*) &innobase_log_group_home_dir, SHOW_CHAR_PTR},
#endif
{"interactive_timeout", (char*) &net_interactive_timeout, SHOW_LONG},
{"join_buffer_size", (char*) &join_buff_size, SHOW_LONG},
......@@ -3097,14 +3097,14 @@ static void usage(void)
#endif
#ifdef HAVE_INNOBASE_DB
puts("\
--innobase_data_home_dir=dir The common part for innobase table spaces\n\
--innobase_data_file_path=dir Path to individual files and their sizes\n\
--innobase_flush_log_at_trx_commit[=#]\n\
--innodb_data_home_dir=dir The common part for Innodb table spaces\n\
--innodb_data_file_path=dir Path to individual files and their sizes\n\
--innodb_flush_log_at_trx_commit[=#]\n\
Set to 0 if you don't want to flush logs\n\
--innobase_log_arch_dir=dir Where full logs should be archived\n\
--innobase_log_archive[=#] Set to 1 if you want to have logs archived\n\
--innobase_log_group_home_dir=dir Path to Innobase log files.\n\
--skip-innobase Don't use innobase (will save memory)\n\
--innodb_log_arch_dir=dir Where full logs should be archived\n\
--innodb_log_archive[=#] Set to 1 if you want to have logs archived\n\
--innodb_log_group_home_dir=dir Path to innodb log files.\n\
--skip-innodb Don't use Innodb (will save memory)\n\
");
#endif /* HAVE_INNOBASE_DB */
print_defaults("my",load_default_groups);
......@@ -3657,31 +3657,31 @@ static void get_options(int argc,char **argv)
gemini_options |= GEMOPT_UNBUFFERED_IO;
#endif
break;
case OPT_INNOBASE_SKIP:
case OPT_INNODB_SKIP:
#ifdef HAVE_INNOBASE_DB
innobase_skip=1;
have_innobase=SHOW_OPTION_DISABLED;
innodb_skip=1;
have_innodb=SHOW_OPTION_DISABLED;
#endif
break;
case OPT_INNOBASE_DATA_FILE_PATH:
case OPT_INNODB_DATA_FILE_PATH:
#ifdef HAVE_INNOBASE_DB
innobase_data_file_path=optarg;
#endif
break;
#ifdef HAVE_INNOBASE_DB
case OPT_INNOBASE_DATA_HOME_DIR:
case OPT_INNODB_DATA_HOME_DIR:
innobase_data_home_dir=optarg;
break;
case OPT_INNOBASE_LOG_GROUP_HOME_DIR:
case OPT_INNODB_LOG_GROUP_HOME_DIR:
innobase_log_group_home_dir=optarg;
break;
case OPT_INNOBASE_LOG_ARCH_DIR:
case OPT_INNODB_LOG_ARCH_DIR:
innobase_log_arch_dir=optarg;
break;
case OPT_INNOBASE_LOG_ARCHIVE:
case OPT_INNODB_LOG_ARCHIVE:
innobase_log_archive= optarg ? test(atoi(optarg)) : 1;
break;
case OPT_INNOBASE_FLUSH_LOG_AT_TRX_COMMIT:
case OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT:
innobase_flush_log_at_trx_commit= optarg ? test(atoi(optarg)) : 1;
break;
#endif /* HAVE_INNOBASE_DB */
......
......@@ -150,8 +150,8 @@ int mysql_delete(THD *thd,
(OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN)));
#ifdef HAVE_INNOBASE_DB
/* We need to add code to not generate table based on the table type */
if (!innobase_skip)
use_generate_table=0; // Innobase can't use re-generate table
if (!innodb_skip)
use_generate_table=0; // Innodb can't use re-generate table
#endif
if (use_generate_table && ! thd->open_tables)
{
......
......@@ -47,20 +47,20 @@ server-id = 1
#set-variable = bdb_max_lock=100000
# Uncomment the following if you are using Innobase tables
#innobase_data_home_dir = @localstatedir@/
#innobase_log_group_home_dir = @localstatedir@/
#innobase_log_arch_dir = @localstatedir@/
#innobase_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
#set-variable = innobase_mirrored_log_groups=1
#set-variable = innobase_log_files_in_group=3
#set-variable = innobase_log_file_size=5M
#set-variable = innobase_log_buffer_size=8M
#innobase_flush_log_at_trx_commit=1
#innobase_log_archive=0
#set-variable = innobase_buffer_pool_size=16M
#set-variable = innobase_additional_mem_pool_size=2M
#set-variable = innobase_file_io_threads=4
#set-variable = innobase_lock_wait_timeout=50
#innodb_data_home_dir = @localstatedir@/
#innodb_log_group_home_dir = @localstatedir@/
#innodb_log_arch_dir = @localstatedir@/
#innodb_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
#set-variable = innodb_mirrored_log_groups=1
#set-variable = innodb_log_files_in_group=3
#set-variable = innodb_log_file_size=5M
#set-variable = innodb_log_buffer_size=8M
#innodb_flush_log_at_trx_commit=1
#innodb_log_archive=0
#set-variable = innodb_buffer_pool_size=16M
#set-variable = innodb_additional_mem_pool_size=2M
#set-variable = innodb_file_io_threads=4
#set-variable = innodb_lock_wait_timeout=50
[mysqldump]
quick
......
......@@ -43,20 +43,20 @@ server-id = 1
#set-variable = bdb_max_lock=100000
# Uncomment the following if you are using Innobase tables
#innobase_data_home_dir = @localstatedir@/
#innobase_log_group_home_dir = @localstatedir@/
#innobase_log_arch_dir = @localstatedir@/
#innobase_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
#set-variable = innobase_mirrored_log_groups=1
#set-variable = innobase_log_files_in_group=3
#set-variable = innobase_log_file_size=5M
#set-variable = innobase_log_buffer_size=8M
#innobase_flush_log_at_trx_commit=1
#innobase_log_archive=0
#set-variable = innobase_buffer_pool_size=16M
#set-variable = innobase_additional_mem_pool_size=2M
#set-variable = innobase_file_io_threads=4
#set-variable = innobase_lock_wait_timeout=50
#innodb_data_home_dir = @localstatedir@/
#innodb_log_group_home_dir = @localstatedir@/
#innodb_log_arch_dir = @localstatedir@/
#innodb_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
#set-variable = innodb_mirrored_log_groups=1
#set-variable = innodb_log_files_in_group=3
#set-variable = innodb_log_file_size=5M
#set-variable = innodb_log_buffer_size=8M
#innodb_flush_log_at_trx_commit=1
#innodb_log_archive=0
#set-variable = innodb_buffer_pool_size=16M
#set-variable = innodb_additional_mem_pool_size=2M
#set-variable = innodb_file_io_threads=4
#set-variable = innodb_lock_wait_timeout=50
# Point the following paths to different dedicated disks
#tmpdir = /tmp/
......
......@@ -45,20 +45,20 @@ server-id = 1
#set-variable = bdb_max_lock=10000
# Uncomment the following if you are using Innobase tables
#innobase_data_home_dir = @localstatedir@/
#innobase_log_group_home_dir = @localstatedir@/
#innobase_log_arch_dir = @localstatedir@/
#innobase_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
#set-variable = innobase_mirrored_log_groups=1
#set-variable = innobase_log_files_in_group=3
#set-variable = innobase_log_file_size=5M
#set-variable = innobase_log_buffer_size=8M
#innobase_flush_log_at_trx_commit=1
#innobase_log_archive=0
#set-variable = innobase_buffer_pool_size=16M
#set-variable = innobase_additional_mem_pool_size=2M
#set-variable = innobase_file_io_threads=4
#set-variable = innobase_lock_wait_timeout=50
#innodb_data_home_dir = @localstatedir@/
#innodb_log_group_home_dir = @localstatedir@/
#innodb_log_arch_dir = @localstatedir@/
#innodb_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
#set-variable = innodb_mirrored_log_groups=1
#set-variable = innodb_log_files_in_group=3
#set-variable = innodb_log_file_size=5M
#set-variable = innodb_log_buffer_size=8M
#innodb_flush_log_at_trx_commit=1
#innodb_log_archive=0
#set-variable = innodb_buffer_pool_size=16M
#set-variable = innodb_additional_mem_pool_size=2M
#set-variable = innodb_file_io_threads=4
#set-variable = innodb_lock_wait_timeout=50
[mysqldump]
quick
......
......@@ -42,20 +42,20 @@ server-id = 1
#skip-bdb
# Uncomment the following if you are using Innobase tables
#innobase_data_home_dir = @localstatedir@/
#innobase_log_group_home_dir = @localstatedir@/
#innobase_log_arch_dir = @localstatedir@/
#innobase_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
#set-variable = innobase_mirrored_log_groups=1
#set-variable = innobase_log_files_in_group=3
#set-variable = innobase_log_file_size=5M
#set-variable = innobase_log_buffer_size=8M
#innobase_flush_log_at_trx_commit=1
#innobase_log_archive=0
#set-variable = innobase_buffer_pool_size=16M
#set-variable = innobase_additional_mem_pool_size=2M
#set-variable = innobase_file_io_threads=4
#set-variable = innobase_lock_wait_timeout=50
#innodb_data_home_dir = @localstatedir@/
#innodb_log_group_home_dir = @localstatedir@/
#innodb_log_arch_dir = @localstatedir@/
#innodb_data_file_path = ibdata1:25M;ibdata2:37M;ibdata3:100M;ibdata4:300M
#set-variable = innodb_mirrored_log_groups=1
#set-variable = innodb_log_files_in_group=3
#set-variable = innodb_log_file_size=5M
#set-variable = innodb_log_buffer_size=8M
#innodb_flush_log_at_trx_commit=1
#innodb_log_archive=0
#set-variable = innodb_buffer_pool_size=16M
#set-variable = innodb_additional_mem_pool_size=2M
#set-variable = innodb_file_io_threads=4
#set-variable = innodb_lock_wait_timeout=50
[mysqldump]
quick
......
......@@ -22,7 +22,7 @@ Obsoletes: mysql
# Think about what you use here since the first step is to
# run a rm -rf
BuildRoot: /var/tmp/mysql
BuildRoot: /var/tmp/mysql-max
# From the manual
%description
......@@ -72,7 +72,7 @@ para maiores informa
%prep
%setup -n mysql-max-%{mysql_version}
# %setup -T -D -a 1 -n mysql-%{mysql_version}
# %setup -T -D -a 1 -n mysql-max-%{mysql_version}
%build
# The all-static flag is to make the RPM work on different
......@@ -105,7 +105,7 @@ sh -c "PATH=\"${MYSQL_BUILD_PATH:-/bin:/usr/bin}\" \
--includedir=/usr/include \
--mandir=/usr/man \
--with-berkeley-db \
--with-innobase \
--with-innodb \
--with-comment=\"Official MySQL-Max RPM\";
# Add this for more debugging support
# --with-debug
......@@ -118,7 +118,7 @@ sh -c "PATH=\"${MYSQL_BUILD_PATH:-/bin:/usr/bin}\" \
# Use the build root for temporary storage of the shared libraries.
RBR=$RPM_BUILD_ROOT
MBD=$RPM_BUILD_DIR/mysql-%{mysql_version}
MBD=$RPM_BUILD_DIR/mysql-max-%{mysql_version}
if test -z "$RBR" -o "$RBR" = "/"
then
echo "RPM_BUILD_ROOT has stupid value"
......@@ -140,7 +140,7 @@ BuildMySQL "--disable-shared" \
%install -n mysql-max-%{mysql_version}
RBR=$RPM_BUILD_ROOT
MBD=$RPM_BUILD_DIR/mysql-%{mysql_version}
MBD=$RPM_BUILD_DIR/mysql-max-%{mysql_version}
# Ensure that needed directories exists
install -d $RBR/etc/{logrotate.d,rc.d/init.d}
install -d $RBR/var/lib/mysql/mysql
......@@ -157,7 +157,7 @@ install -m644 $MBD/support-files/mysql-log-rotate $RBR/etc/logrotate.d/mysql
install -m755 $MBD/support-files/mysql.server $RBR/etc/rc.d/init.d/mysql
# Install docs
install -m644 $RPM_BUILD_DIR/mysql-%{mysql_version}/Docs/mysql.info \
install -m644 $RPM_BUILD_DIR/mysql-max-%{mysql_version}/Docs/mysql.info \
$RBR/usr/info/mysql.info
for file in README COPYING COPYING.LIB Docs/manual_toc.html Docs/manual.html \
Docs/manual.txt Docs/manual.texi Docs/manual.ps \
......@@ -259,5 +259,5 @@ fi
%changelog
* 2000-04-01 Monty
* Fri Apr 13 2001 Monty
First version of mysql-max.spec.sh based on mysql.spec.sh
......@@ -127,6 +127,17 @@ Group: Applications/Databases
This package contains the shared libraries (*.so*) which certain
languages and applications need to dynamically load and use MySQL.
%package Max
Release: %{release}
Summary: MySQL - server with Berkeley DB and Innodb support
Group: Applications/Databases
Obsoletes: mysql-Max
%description Max
Extra MySQL server binary to get support extra features like
transactional tables. To active these features on only have to install
this package after the server package.
%prep
%setup -n mysql-%{mysql_version}
......@@ -160,8 +171,6 @@ sh -c "PATH=\"${MYSQL_BUILD_PATH:-/bin:/usr/bin}\" \
--infodir=/usr/info \
--includedir=/usr/include \
--mandir=/usr/man \
--without-berkeley-db \
--without-innobase \
--with-comment=\"Official MySQL RPM\";
# Add this for more debugging support
# --with-debug
......@@ -185,12 +194,15 @@ fi
rm -rf $RBR
mkdir -p $RBR
BuildMySQL "--enable-shared --enable-thread-safe-client --without-server"
# Build the shared libraries and mysqld-max
BuildMySQL "--enable-shared --enable-thread-safe-client --with-berkeley-db --with-innodb --with-mysqld-ldflags='-all-static' --with-server-suffix='-Max'"
# Save everything for debus
tar cf $RBR/all.tar .
# Save everything for debug
# tar cf $RBR/all.tar .
# Save shared libraries
# Save shared libraries and mysqld-max
mv sql/mysqld sql/mysqld-max
(cd libmysql/.libs; tar cf $RBR/shared-libs.tar *.so*)
(cd libmysql_r/.libs; tar rf $RBR/shared-libs.tar *.so*)
......@@ -201,7 +213,8 @@ mv Docs/manual.ps.save Docs/manual.ps
BuildMySQL "--disable-shared" \
"--with-mysqld-ldflags='-all-static'" \
"--with-client-ldflags='-all-static'"
"--with-client-ldflags='-all-static'" \
"--without-berkeley-db --without-innodb"
%install -n mysql-%{mysql_version}
RBR=$RPM_BUILD_ROOT
......@@ -220,6 +233,9 @@ make install DESTDIR=$RBR benchdir_root=/usr/share/
# Install shared libraries (Disable for architectures that don't support it)
(cd $RBR/usr/lib; tar xf $RBR/shared-libs.tar)
# install saved mysqld-max
install -m755 $MBD/sql/mysqld-max $RBR/usr/sbin/mysqld-max
# Install logrotate and autostart
install -m644 $MBD/support-files/mysql-log-rotate $RBR/etc/logrotate.d/mysql
install -m755 $MBD/support-files/mysql.server $RBR/etc/rc.d/init.d/mysql
......@@ -370,8 +386,15 @@ fi
%attr(-, root, root) /usr/share/sql-bench
%attr(-, root, root) /usr/share/mysql-test
%files Max
%attr(755, root, root) /usr/sbin/mysqld-max
%changelog
* Fri Apr 13 2001 Monty
- Added mysqld-max to the distribution
* Tue Jan 2 2001 Monty
- Added mysql-test to the bench package
......
......@@ -17,9 +17,9 @@ package main;
$opt_skip_create=$opt_skip_in=$opt_verbose=$opt_fast_insert=
$opt_lock_tables=$opt_debug=$opt_skip_delete=$opt_fast=$opt_force=0;
$opt_threads=5;
$opt_host=""; $opt_db="test";
$opt_host=$opt_user=$opt_password=""; $opt_db="test";
GetOptions("host=s","db=s","loop-count=i","skip-create","skip-in","skip-delete","verbose","fast-insert","lock-tables","debug","fast","force","threads=i") || die "Aborted";
GetOptions("host=s","db=s","user=s","password=s","loop-count=i","skip-create","skip-in","skip-delete","verbose","fast-insert","lock-tables","debug","fast","force","threads=i") || die "Aborted";
$opt_verbose=$opt_debug=$opt_lock_tables=$opt_fast_insert=$opt_fast=$opt_skip_in=$opt_force=undef; # Ignore warnings from these
print "Test of multiple connections that test the following things:\n";
......@@ -93,6 +93,7 @@ test_update() if (($pid=fork()) == 0); $work{$pid}="update";
test_flush() if (($pid=fork()) == 0); $work{$pid}= "flush";
test_check() if (($pid=fork()) == 0); $work{$pid}="check";
test_repair() if (($pid=fork()) == 0); $work{$pid}="repair";
#test_database("test2") if (($pid=fork()) == 0); $work{$pid}="check_database";
print "Started " . ($opt_threads*2+4) . " threads\n";
......@@ -331,7 +332,6 @@ sub test_check
exit(0);
}
#
# Do a repair on the first table once in a while
#
......@@ -392,6 +392,42 @@ sub test_flush
exit(0);
}
#
# Test all tables in a database
#
sub test_database
{
my ($database) = @_;
my ($dbh, $row, $i, $type, $tables);
$dbh = DBI->connect("DBI:mysql:$database:$opt_host",
$opt_user, $opt_password,
{ PrintError => 0}) || die $DBI::errstr;
$tables= join(',',$dbh->func('_ListTables'));
$type= "check";
for ($i=0 ; !test_if_abort($dbh) ; $i++)
{
sleep(120);
$sth=$dbh->prepare("$type table $tables") || die "Got error on prepare: $DBI::errstr\n";
$sth->execute || die $DBI::errstr;
while (($row=$sth->fetchrow_arrayref))
{
if ($row->[3] ne "OK")
{
print "Got error " . $row->[2] . " " . $row->[3] . " when doing $type on " . $row->[0] . "\n";
exit(1);
}
}
}
$dbh->disconnect; $dbh=0;
print "test_check: Executed $i checks\n";
exit(0);
}
#
# Help functions
#
......@@ -412,7 +448,7 @@ sub signal_abort
sub test_if_abort()
{
my ($dbh)=@_;
$row=simple_query($dbh,"select * from $abort_table");
$row=simple_query($dbh,"select * from $opt_db.$abort_table");
return (defined($row) && defined($row->[0]) != 0) ? 1 : 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment