Commit 72e7d696 authored by brian@avenger.(none)'s avatar brian@avenger.(none)

Added tests for archive. Cleaned up a merge mistake and added some information...

Added tests for archive. Cleaned up a merge mistake and added some information on how well archive compresses. 
parent fe83735a
-- require r/have_archive.require
disable_query_log;
show variables like "have_archive";
enable_query_log;
This diff is collapsed.
Variable_name Value
have_archive YES
This diff is collapsed.
...@@ -53,6 +53,18 @@ ...@@ -53,6 +53,18 @@
to be any faster. For writes it is always a bit slower then MyISAM. It has no to be any faster. For writes it is always a bit slower then MyISAM. It has no
internal limits though for row length. internal limits though for row length.
Examples between MyISAM and Archive.
Table with 76695844 identical rows:
29680807 a_archive.ARZ
920350317 a.MYD
Table with 8991478 rows (all of Slashdot's comments):
1922964506 comment_archive.ARZ
2944970297 comment_text.MYD
TODO: TODO:
Add bzip optional support. Add bzip optional support.
Allow users to set compression level. Allow users to set compression level.
...@@ -225,7 +237,7 @@ int ha_archive::close(void) ...@@ -225,7 +237,7 @@ int ha_archive::close(void)
if (gzclose(archive) == Z_ERRNO) if (gzclose(archive) == Z_ERRNO)
rc =-1; rc =-1;
rc |= free_share(share); rc |= free_share(share);
DBUG_RETURN(); DBUG_RETURN(rc);
} }
...@@ -276,7 +288,7 @@ int ha_archive::write_row(byte * buf) ...@@ -276,7 +288,7 @@ int ha_archive::write_row(byte * buf)
statistic_increment(ha_write_count,&LOCK_status); statistic_increment(ha_write_count,&LOCK_status);
if (table->timestamp_default_now) if (table->timestamp_default_now)
update_timestamp(record+table->timestamp_default_now-1); update_timestamp(buf+table->timestamp_default_now-1);
written = gzwrite(share->archive_write, buf, table->reclength); written = gzwrite(share->archive_write, buf, table->reclength);
share->dirty= true; share->dirty= true;
if (written == 0 || written != table->reclength) if (written == 0 || written != table->reclength)
...@@ -335,7 +347,7 @@ int ha_archive::rnd_init(bool scan) ...@@ -335,7 +347,7 @@ int ha_archive::rnd_init(bool scan)
intact. intact.
*/ */
read= gzread(archive, &version, sizeof(version)); read= gzread(archive, &version, sizeof(version));
if (written == 0 || written != sizeof(version)) if (read == 0 || read != sizeof(version))
DBUG_RETURN(-1); DBUG_RETURN(-1);
records = 0; records = 0;
DBUG_RETURN(0); DBUG_RETURN(0);
......
...@@ -158,7 +158,7 @@ enum db_type ...@@ -158,7 +158,7 @@ enum db_type
DB_TYPE_MRG_ISAM, DB_TYPE_MYISAM, DB_TYPE_MRG_MYISAM, DB_TYPE_MRG_ISAM, DB_TYPE_MYISAM, DB_TYPE_MRG_MYISAM,
DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB, DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB,
DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER, DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER,
DB_TYPE_EXAMPLE_DB, DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB,
DB_TYPE_DEFAULT // Must be last DB_TYPE_DEFAULT // Must be last
}; };
......
...@@ -639,6 +639,7 @@ struct show_var_st init_vars[]= { ...@@ -639,6 +639,7 @@ struct show_var_st init_vars[]= {
{"ft_query_expansion_limit",(char*) &ft_query_expansion_limit, SHOW_LONG}, {"ft_query_expansion_limit",(char*) &ft_query_expansion_limit, SHOW_LONG},
{"ft_stopword_file", (char*) &ft_stopword_file, SHOW_CHAR_PTR}, {"ft_stopword_file", (char*) &ft_stopword_file, SHOW_CHAR_PTR},
{sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS}, {sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS},
{"have_archive", (char*) &have_archive_db, SHOW_HAVE},
{"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE}, {"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE},
{"have_compress", (char*) &have_compress, SHOW_HAVE}, {"have_compress", (char*) &have_compress, SHOW_HAVE},
{"have_crypt", (char*) &have_crypt, SHOW_HAVE}, {"have_crypt", (char*) &have_crypt, SHOW_HAVE},
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment