Commit 4235378e authored by unknown's avatar unknown

Merge baker@bk-internal.mysql.com:/home/bk/mysql-5.0

into  zim.(none):/home/brian/mysql/mysql-5.0

parents e0da8f43 d30bfd32
...@@ -396,6 +396,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, ...@@ -396,6 +396,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
share->table_name_length= length; share->table_name_length= length;
share->table_name= tmp_name; share->table_name= tmp_name;
share->crashed= FALSE; share->crashed= FALSE;
share->archive_write_open= FALSE;
fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME); fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME); fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
strmov(share->table_name,table_name); strmov(share->table_name,table_name);
...@@ -413,16 +414,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, ...@@ -413,16 +414,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
*/ */
if (read_meta_file(share->meta_file, &share->rows_recorded)) if (read_meta_file(share->meta_file, &share->rows_recorded))
share->crashed= TRUE; share->crashed= TRUE;
else
(void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
share->crashed= TRUE;
VOID(my_hash_insert(&archive_open_tables, (byte*) share)); VOID(my_hash_insert(&archive_open_tables, (byte*) share));
thr_lock_init(&share->lock); thr_lock_init(&share->lock);
} }
...@@ -460,6 +452,7 @@ int ha_archive::free_share(ARCHIVE_SHARE *share) ...@@ -460,6 +452,7 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
(void)write_meta_file(share->meta_file, share->rows_recorded, TRUE); (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
else else
(void)write_meta_file(share->meta_file, share->rows_recorded, FALSE); (void)write_meta_file(share->meta_file, share->rows_recorded, FALSE);
if (share->archive_write_open)
if (gzclose(share->archive_write) == Z_ERRNO) if (gzclose(share->archive_write) == Z_ERRNO)
rc= 1; rc= 1;
if (my_close(share->meta_file, MYF(0))) if (my_close(share->meta_file, MYF(0)))
...@@ -471,6 +464,26 @@ int ha_archive::free_share(ARCHIVE_SHARE *share) ...@@ -471,6 +464,26 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
DBUG_RETURN(rc); DBUG_RETURN(rc);
} }
int ha_archive::init_archive_writer()
{
DBUG_ENTER("ha_archive::init_archive_writer");
(void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
{
share->crashed= TRUE;
DBUG_RETURN(1);
}
share->archive_write_open= TRUE;
DBUG_RETURN(0);
}
/* /*
We just implement one additional file extension. We just implement one additional file extension.
...@@ -693,6 +706,10 @@ int ha_archive::write_row(byte *buf) ...@@ -693,6 +706,10 @@ int ha_archive::write_row(byte *buf)
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time(); table->timestamp_field->set_time();
pthread_mutex_lock(&share->mutex); pthread_mutex_lock(&share->mutex);
if (!share->archive_write_open)
if (init_archive_writer())
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
share->rows_recorded++; share->rows_recorded++;
rc= real_write_row(buf, share->archive_write); rc= real_write_row(buf, share->archive_write);
pthread_mutex_unlock(&share->mutex); pthread_mutex_unlock(&share->mutex);
...@@ -893,6 +910,10 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) ...@@ -893,6 +910,10 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
gzFile writer; gzFile writer;
char writer_filename[FN_REFLEN]; char writer_filename[FN_REFLEN];
/* Open up the writer if we haven't yet */
if (!share->archive_write_open)
init_archive_writer();
/* Flush any waiting data */ /* Flush any waiting data */
gzflush(share->archive_write, Z_SYNC_FLUSH); gzflush(share->archive_write, Z_SYNC_FLUSH);
......
...@@ -34,6 +34,7 @@ typedef struct st_archive_share { ...@@ -34,6 +34,7 @@ typedef struct st_archive_share {
THR_LOCK lock; THR_LOCK lock;
File meta_file; /* Meta file we use */ File meta_file; /* Meta file we use */
gzFile archive_write; /* Archive file we are working with */ gzFile archive_write; /* Archive file we are working with */
bool archive_write_open;
bool dirty; /* Flag for if a flush should occur */ bool dirty; /* Flag for if a flush should occur */
bool crashed; /* Meta file is crashed */ bool crashed; /* Meta file is crashed */
ha_rows rows_recorded; /* Number of rows in tables */ ha_rows rows_recorded; /* Number of rows in tables */
...@@ -87,6 +88,7 @@ public: ...@@ -87,6 +88,7 @@ public:
int write_meta_file(File meta_file, ha_rows rows, bool dirty); int write_meta_file(File meta_file, ha_rows rows, bool dirty);
ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc); ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc);
int free_share(ARCHIVE_SHARE *share); int free_share(ARCHIVE_SHARE *share);
int init_archive_writer();
bool auto_repair() const { return 1; } // For the moment we just do this bool auto_repair() const { return 1; } // For the moment we just do this
int read_data_header(gzFile file_to_read); int read_data_header(gzFile file_to_read);
int write_data_header(gzFile file_to_write); int write_data_header(gzFile file_to_write);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment