Commit 62637c9d authored by unknown's avatar unknown

Clean up from Bar and Antony code review. Found an issue with the header file...

Clean up from Bar and Antony code review. Found an issue with the header file were it could end up corrupted.


sql/examples/ha_archive.cc:
  Cleanup from Bar's and Antony's code review. Meta file should not be system independent (so you can copy around the files as you like).
sql/examples/ha_archive.h:
  No longer bother with storing the working version of the data files. Its unimportant while there is only one format.
parent 89882d55
...@@ -118,11 +118,18 @@ static HASH archive_open_tables; ...@@ -118,11 +118,18 @@ static HASH archive_open_tables;
static int archive_init= 0; static int archive_init= 0;
/* The file extension */ /* The file extension */
#define ARZ ".ARZ" // The data file #define ARZ ".ARZ" // The data file
#define ARN ".ARN" // Files used during an optimize call #define ARN ".ARN" // Files used during an optimize call
#define ARM ".ARM" // Meta file #define ARM ".ARM" // Meta file
#define META_BUFFER_SIZE 24 // Size of the data used in the meta file /*
#define CHECK_HEADER 254 // The number we use to determine corruption uchar + uchar + ulonglong + ulonglong + uchar
*/
#define META_BUFFER_SIZE 19 // Size of the data used in the meta file
/*
uchar + uchar
*/
#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
/* /*
Used for hash table that tracks open tables. Used for hash table that tracks open tables.
...@@ -139,19 +146,21 @@ static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length, ...@@ -139,19 +146,21 @@ static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length,
*/ */
int ha_archive::read_data_header(gzFile file_to_read) int ha_archive::read_data_header(gzFile file_to_read)
{ {
int check; // We use this to check the header uchar data_buffer[DATA_BUFFER_SIZE];
DBUG_ENTER("ha_archive::read_data_header"); DBUG_ENTER("ha_archive::read_data_header");
if (gzrewind(file_to_read) == -1) if (gzrewind(file_to_read) == -1)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
if (gzread(file_to_read, &check, sizeof(int)) != sizeof(int)) if (gzread(file_to_read, data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE)
DBUG_RETURN(errno ? errno : -1); DBUG_RETURN(errno ? errno : -1);
if (check != CHECK_HEADER)
DBUG_PRINT("ha_archive::read_data_header", ("Check %u", data_buffer[0]));
DBUG_PRINT("ha_archive::read_data_header", ("Version %u", data_buffer[1]));
if ((data_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) &&
(data_buffer[1] != (uchar)ARCHIVE_VERSION))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
if (gzread(file_to_read, &version, sizeof(version)) != sizeof(version))
DBUG_RETURN(errno ? errno : -1);
DBUG_RETURN(0); DBUG_RETURN(0);
} }
...@@ -161,13 +170,17 @@ int ha_archive::read_data_header(gzFile file_to_read) ...@@ -161,13 +170,17 @@ int ha_archive::read_data_header(gzFile file_to_read)
*/ */
int ha_archive::write_data_header(gzFile file_to_write) int ha_archive::write_data_header(gzFile file_to_write)
{ {
int check= CHECK_HEADER; uchar data_buffer[DATA_BUFFER_SIZE];
DBUG_ENTER("ha_archive::write_data_header"); DBUG_ENTER("ha_archive::write_data_header");
if (gzwrite(file_to_write, &check, sizeof(int)) != sizeof(int)) data_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
goto error; data_buffer[1]= (uchar)ARCHIVE_VERSION;
if (gzwrite(file_to_write, &version, sizeof(int)) != sizeof(version))
if (gzwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) !=
sizeof(DATA_BUFFER_SIZE))
goto error; goto error;
DBUG_PRINT("ha_archive::write_data_header", ("Check %u", (uint)data_buffer[0]));
DBUG_PRINT("ha_archive::write_data_header", ("Version %u", (uint)data_buffer[1]));
DBUG_RETURN(0); DBUG_RETURN(0);
error: error:
...@@ -180,36 +193,30 @@ error: ...@@ -180,36 +193,30 @@ error:
*/ */
int ha_archive::read_meta_file(File meta_file, ulonglong *rows) int ha_archive::read_meta_file(File meta_file, ulonglong *rows)
{ {
size_t size= sizeof(ulonglong) + sizeof(version) + sizeof(bool); // calculate uchar meta_buffer[META_BUFFER_SIZE];
byte meta_buffer[META_BUFFER_SIZE];
bool dirty;
int check;
ulonglong check_point; ulonglong check_point;
DBUG_ENTER("ha_archive::read_meta_file"); DBUG_ENTER("ha_archive::read_meta_file");
/*
Format of the meta file is:
version
number of rows
byte showing if the file was stored
*/
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
if (my_read(meta_file, meta_buffer, size, MYF(MY_WME | MY_NABP))) if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
DBUG_RETURN(-1); DBUG_RETURN(-1);
/* /*
Parse out the meta data, we ignore version at the moment Parse out the meta data, we ignore version at the moment
*/ */
memcpy(&check, meta_buffer + sizeof(int), sizeof(int)); *rows= uint8korr(meta_buffer + 2);
longlongstore(rows, meta_buffer + sizeof(version) + sizeof(int)); check_point= uint8korr(meta_buffer + 10);
longlongstore(&check_point, meta_buffer + sizeof(version) + sizeof(int) +
sizeof(ulonglong)); DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
memcpy(&dirty, meta_buffer+sizeof(ulonglong) + sizeof(version) + DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
sizeof(ulonglong) + sizeof(int), sizeof(bool)); DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lld", *rows));
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lld", check_point));
if (dirty == TRUE) DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)meta_buffer[18]));
DBUG_RETURN(-1);
if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
((bool)meta_buffer[18] == TRUE))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
my_sync(meta_file, MYF(MY_WME)); my_sync(meta_file, MYF(MY_WME));
...@@ -224,26 +231,24 @@ int ha_archive::read_meta_file(File meta_file, ulonglong *rows) ...@@ -224,26 +231,24 @@ int ha_archive::read_meta_file(File meta_file, ulonglong *rows)
*/ */
int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty) int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty)
{ {
char meta_buffer[META_BUFFER_SIZE]; uchar meta_buffer[META_BUFFER_SIZE];
ulonglong check_port= 0; ulonglong check_point= 0; //Reserved for the future
size_t size= sizeof(ulonglong) + sizeof(version) + sizeof(bool) +
sizeof(ulonglong); // calculate length of data
DBUG_ENTER("ha_archive::write_meta_file"); DBUG_ENTER("ha_archive::write_meta_file");
/* meta_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
Format of the meta file is: meta_buffer[1]= (uchar)ARCHIVE_VERSION;
version int8store(meta_buffer + 2, rows);
number of rows int8store(meta_buffer + 10, check_point);
byte showing if the file was stored *(meta_buffer + 18)= (uchar)dirty;
*/ DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER));
version= ARCHIVE_VERSION; DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION));
memcpy(meta_buffer, &version, sizeof(version)); DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", rows));
longlongstore(meta_buffer + sizeof(version), rows); // Position past version DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
longlongstore(meta_buffer + sizeof(version) + sizeof(ulonglong), check_port); DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
memcpy(meta_buffer+sizeof(ulonglong) + sizeof(version) + + sizeof(ulonglong), &dirty, sizeof(bool));
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
if (my_write(meta_file, meta_buffer, size, MYF(MY_WME | MY_NABP))) if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
DBUG_RETURN(-1); DBUG_RETURN(-1);
my_sync(meta_file, MYF(MY_WME)); my_sync(meta_file, MYF(MY_WME));
...@@ -448,19 +453,12 @@ int ha_archive::close(void) ...@@ -448,19 +453,12 @@ int ha_archive::close(void)
/* /*
We create our data file here. The format is pretty simple. The first We create our data file here. The format is pretty simple.
bytes in any file are the version number. Currently we do nothing You can read about the format of the data file above.
with this, but in the future this gives us the ability to figure out Unlike other storage engines we do not "pack" our data. Since we
version if we change the format at all. After the version we are about to do a general compression, packing would just be a waste of
starting writing our rows. Unlike other storage engines we do not CPU time. If the table has blobs they are written after the row in the order
"pack" our data. Since we are about to do a general compression, of creation.
packing would just be a waste of CPU time. If the table has blobs
they are written after the row in the order of creation.
So to read a row we:
Read the version
Read the record and copy it into buf
Loop through any blobs and read them
*/ */
int ha_archive::create(const char *name, TABLE *table_arg, int ha_archive::create(const char *name, TABLE *table_arg,
...@@ -468,15 +466,9 @@ int ha_archive::create(const char *name, TABLE *table_arg, ...@@ -468,15 +466,9 @@ int ha_archive::create(const char *name, TABLE *table_arg,
{ {
File create_file; // We use to create the datafile and the metafile File create_file; // We use to create the datafile and the metafile
char name_buff[FN_REFLEN]; char name_buff[FN_REFLEN];
size_t written;
int error; int error;
DBUG_ENTER("ha_archive::create"); DBUG_ENTER("ha_archive::create");
/*
Right now version for the meta file and the data file is the same.
*/
version= ARCHIVE_VERSION;
if ((create_file= my_create(fn_format(name_buff,name,"",ARM, if ((create_file= my_create(fn_format(name_buff,name,"",ARM,
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0, MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
...@@ -538,10 +530,11 @@ int ha_archive::write_row(byte * buf) ...@@ -538,10 +530,11 @@ int ha_archive::write_row(byte * buf)
DBUG_ENTER("ha_archive::write_row"); DBUG_ENTER("ha_archive::write_row");
statistic_increment(ha_write_count,&LOCK_status); statistic_increment(ha_write_count,&LOCK_status);
if (table->timestamp_default_now) if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
update_timestamp(buf+table->timestamp_default_now-1); table->timestamp_field->set_time();
pthread_mutex_lock(&share->mutex); pthread_mutex_lock(&share->mutex);
written= gzwrite(share->archive_write, buf, table->reclength); written= gzwrite(share->archive_write, buf, table->reclength);
DBUG_PRINT("ha_archive::get_row", ("Wrote %d bytes expected %d", written, table->reclength));
share->dirty= TRUE; share->dirty= TRUE;
if (written != table->reclength) if (written != table->reclength)
goto error; goto error;
...@@ -625,7 +618,7 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf) ...@@ -625,7 +618,7 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf)
DBUG_ENTER("ha_archive::get_row"); DBUG_ENTER("ha_archive::get_row");
read= gzread(file_to_read, buf, table->reclength); read= gzread(file_to_read, buf, table->reclength);
DBUG_PRINT("ha_archive::get_row", ("Read %d bytes", read)); DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->reclength));
if (read == Z_STREAM_ERROR) if (read == Z_STREAM_ERROR)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
...@@ -843,7 +836,8 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd, ...@@ -843,7 +836,8 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
THR_LOCK_DATA **to, THR_LOCK_DATA **to,
enum thr_lock_type lock_type) enum thr_lock_type lock_type)
{ {
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
{
/* /*
Here is where we get into the guts of a row level lock. Here is where we get into the guts of a row level lock.
If TL_UNLOCK is set If TL_UNLOCK is set
...@@ -853,10 +847,8 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd, ...@@ -853,10 +847,8 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
lock_type <= TL_WRITE) && !thd->in_lock_tables lock_type <= TL_WRITE) && !thd->in_lock_tables
&& !thd->tablespace_op) { && !thd->tablespace_op)
lock_type = TL_WRITE_ALLOW_WRITE; lock_type = TL_WRITE_ALLOW_WRITE;
}
/* /*
In queries of type INSERT INTO t1 SELECT ... FROM t2 ... In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
...@@ -866,9 +858,8 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd, ...@@ -866,9 +858,8 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
concurrent inserts to t2. concurrent inserts to t2.
*/ */
if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables) { if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables)
lock_type = TL_READ; lock_type = TL_READ;
}
lock.type=lock_type; lock.type=lock_type;
} }
......
...@@ -52,7 +52,6 @@ class ha_archive: public handler ...@@ -52,7 +52,6 @@ class ha_archive: public handler
z_off_t current_position; /* The position of the row we just read */ z_off_t current_position; /* The position of the row we just read */
byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */ byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */
String buffer; /* Buffer used for blob storage */ String buffer; /* Buffer used for blob storage */
uint version; /* Used for recording version */
ulonglong scan_rows; /* Number of rows left in scan */ ulonglong scan_rows; /* Number of rows left in scan */
public: public:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment