Commit 9879f436 authored by brian@zim.(none)'s avatar brian@zim.(none)

New azio which keeps meta data in its own header.

parent f85acb89
......@@ -31,7 +31,7 @@ LDADD =
DEFS = @DEFS@
noinst_HEADERS = ha_archive.h azlib.h
noinst_PROGRAMS = archive_test
noinst_PROGRAMS = archive_test archive_reader
EXTRA_LTLIBRARIES = ha_archive.la
pkglib_LTLIBRARIES = @plugin_archive_shared_target@
......@@ -56,6 +56,14 @@ archive_test_LDADD = $(top_builddir)/mysys/libmysys.a \
@ZLIB_LIBS@
archive_test_LDFLAGS = @NOINST_LDFLAGS@
archive_reader_SOURCES = archive_reader.c azio.c
archive_reader_CFLAGS = $(AM_CFLAGS)
archive_reader_LDADD = $(top_builddir)/mysys/libmysys.a \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/strings/libmystrings.a \
@ZLIB_LIBS@
archive_reader_LDFLAGS = @NOINST_LDFLAGS@
EXTRA_DIST = CMakeLists.txt plug.in
# Don't update the files from bitkeeper
......
#include "azlib.h"
#include <string.h>
#include <assert.h>
#include <stdio.h>
#define BUFFER_LEN 1024
int main(int argc, char *argv[])
{
unsigned int ret;
azio_stream reader_handle;
MY_INIT(argv[0]);
if (argc < 2)
{
printf("No file specified. \n");
return 0;
}
if (!(ret= azopen(&reader_handle, argv[1], O_RDONLY|O_BINARY)))
{
printf("Could not create test file\n");
return 0;
}
printf("Version :%u\n", reader_handle.version);
printf("Start position :%llu\n", (unsigned long long)reader_handle.start);
printf("Block size :%u\n", reader_handle.block_size);
printf("Rows: %llu\n", reader_handle.rows);
printf("Autoincrement: %llu\n", reader_handle.auto_increment);
printf("Check Point: %llu\n", reader_handle.check_point);
printf("Forced Flushes: %llu\n", reader_handle.forced_flushes);
printf("State: %s\n", ( reader_handle.dirty ? "dirty" : "clean"));
azclose(&reader_handle);
return 0;
}
......@@ -5,88 +5,192 @@
#define TEST_FILENAME "test.az"
#define TEST_STRING "YOU don't know about me without you have read a book by the name of The Adventures of Tom Sawyer; but that ain't no matter. That book was made by Mr. Mark Twain, and he told the truth, mainly. There was things which he stretched, but mainly he told the truth. That is nothing. I never seen anybody but lied one time or another, without it was Aunt Polly, or the widow, or maybe Mary. Aunt Polly--Tom's Aunt Polly, she is--and Mary, and the Widow Douglas is all told about in that book, which is mostly a true book, with some stretchers, as I said before. Now the way that the book winds up is this: Tom and me found the money that the robbers hid in the cave, and it made us rich. We got six thousand dollars apiece--all gold. It was an awful sight of money when it was piled up. Well, Judge Thatcher he took it and put it out at interest, and it fetched us a dollar a day apiece all the year round --more than a body could tell what to do with. The Widow Douglas she took me for her son, and allowed she would..."
#define TEST_LOOP_NUM 100
#define BUFFER_LEN 1024
#define TWOGIG 2147483648
#define FOURGIG 4294967296
#define EIGHTGIG 8589934592
int main(int argc __attribute__((unused)), char *argv[])
/* prototypes */
int size_test(unsigned long long length, unsigned long long rows_to_test_for);
int main(int argc, char *argv[])
{
unsigned long ret;
unsigned int ret;
int error;
unsigned int x;
int written_rows= 0;
azio_stream writer_handle, reader_handle;
char buffer[BUFFER_LEN];
unsigned long write_length;
unsigned long read_length= 0;
unlink(TEST_FILENAME);
if (argc > 1)
return 0;
MY_INIT(argv[0]);
if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)))
if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_RDWR|O_BINARY)))
{
printf("Could not create test file\n");
return 0;
}
ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN);
assert(ret == BUFFER_LEN);
azflush(&writer_handle, Z_FINISH);
if (!(ret= azopen(&reader_handle, TEST_FILENAME, O_RDONLY|O_BINARY)))
{
printf("Could not open test file\n");
return 0;
}
ret= azread(&reader_handle, buffer, BUFFER_LEN, &error);
printf("Read %lu bytes, expected %d\n", ret, BUFFER_LEN);
assert(reader_handle.rows == 0);
assert(reader_handle.auto_increment == 0);
assert(reader_handle.check_point == 0);
assert(reader_handle.forced_flushes == 0);
assert(reader_handle.dirty == 1);
for (x= 0; x < TEST_LOOP_NUM; x++)
{
ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN);
assert(ret == BUFFER_LEN);
written_rows++;
}
azflush(&writer_handle, Z_SYNC_FLUSH);
/* Lets test that our internal stats are good */
assert(writer_handle.rows == TEST_LOOP_NUM);
/* Reader needs to be flushed to make sure it is up to date */
azflush(&reader_handle, Z_SYNC_FLUSH);
assert(reader_handle.rows == TEST_LOOP_NUM);
assert(reader_handle.auto_increment == 0);
assert(reader_handle.check_point == 0);
assert(reader_handle.forced_flushes == 1);
assert(reader_handle.dirty == 1);
writer_handle.auto_increment= 4;
azflush(&writer_handle, Z_SYNC_FLUSH);
assert(writer_handle.rows == TEST_LOOP_NUM);
assert(writer_handle.auto_increment == 4);
assert(writer_handle.check_point == 0);
assert(writer_handle.forced_flushes == 2);
assert(writer_handle.dirty == 1);
if (!(ret= azopen(&reader_handle, TEST_FILENAME, O_RDONLY|O_BINARY)))
{
printf("Could not open test file\n");
return 0;
}
/* Read the original data */
for (x= 0; x < writer_handle.rows; x++)
{
ret= azread(&reader_handle, buffer, BUFFER_LEN, &error);
assert(!error);
assert(ret == BUFFER_LEN);
assert(!memcmp(buffer, TEST_STRING, ret));
}
assert(writer_handle.rows == TEST_LOOP_NUM);
/* Test here for falling off the planet */
/* Final Write before closing */
ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN);
assert(ret == BUFFER_LEN);
/* We don't use FINISH, but I want to have it tested */
azflush(&writer_handle, Z_FINISH);
assert(writer_handle.rows == TEST_LOOP_NUM+1);
/* Read final write */
azrewind(&reader_handle);
for (x= 0; x < writer_handle.rows; x++)
{
ret= azread(&reader_handle, buffer, BUFFER_LEN, &error);
assert(ret == BUFFER_LEN);
assert(!error);
assert(!memcmp(buffer, TEST_STRING, ret));
}
azclose(&writer_handle);
if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_APPEND|O_WRONLY|O_BINARY)))
/* Rewind and full test */
azrewind(&reader_handle);
for (x= 0; x < writer_handle.rows; x++)
{
ret= azread(&reader_handle, buffer, BUFFER_LEN, &error);
assert(ret == BUFFER_LEN);
assert(!error);
assert(!memcmp(buffer, TEST_STRING, ret));
}
printf("Finished reading\n");
if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_RDWR|O_BINARY)))
{
printf("Could not open file (%s) for appending\n", TEST_FILENAME);
return 0;
}
ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN);
assert(ret == BUFFER_LEN);
azflush(&writer_handle, Z_FINISH);
azflush(&writer_handle, Z_SYNC_FLUSH);
/* Read the original data */
ret= azread(&reader_handle, buffer, BUFFER_LEN, &error);
printf("Read %lu bytes, expected %d\n", ret, BUFFER_LEN);
assert(ret == BUFFER_LEN);
assert(!error);
/* Read the new data */
ret= azread(&reader_handle, buffer, BUFFER_LEN, &error);
printf("Read %lu bytes, expected %d\n", ret, BUFFER_LEN);
assert(ret == BUFFER_LEN);
assert(!error);
/* Rewind and full test */
azrewind(&reader_handle);
for (x= 0; x < writer_handle.rows; x++)
{
ret= azread(&reader_handle, buffer, BUFFER_LEN, &error);
assert(!error);
assert(ret == BUFFER_LEN);
assert(!memcmp(buffer, TEST_STRING, ret));
}
azclose(&writer_handle);
azclose(&reader_handle);
unlink(TEST_FILENAME);
/* Start size tests */
printf("About to run 2gig and 4gig test now, you may want to hit CTRL-C\n");
printf("About to run 2/4/8 gig tests now, you may want to hit CTRL-C\n");
size_test(TWOGIG, 2097152);
size_test(FOURGIG, 4194304);
size_test(EIGHTGIG, 8388608);
if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)))
return 0;
}
int size_test(unsigned long long length, unsigned long long rows_to_test_for)
{
azio_stream writer_handle, reader_handle;
unsigned long long write_length;
unsigned long long read_length= 0;
unsigned int ret;
char buffer[BUFFER_LEN];
int error;
if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_RDWR|O_TRUNC|O_BINARY)))
{
printf("Could not create test file\n");
return 0;
}
for (write_length= 0; write_length < TWOGIG ; write_length+= ret)
for (write_length= 0; write_length < length ; write_length+= ret)
{
ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN);
assert(!error);
if (ret != BUFFER_LEN)
{
printf("Size %lu\n", ret);
printf("Size %u\n", ret);
assert(ret != BUFFER_LEN);
}
if ((write_length % 14031) == 0)
{
azflush(&writer_handle, Z_SYNC_FLUSH);
}
}
assert(write_length == TWOGIG);
printf("Read %lu bytes, expected %lu\n", write_length, TWOGIG);
azflush(&writer_handle, Z_FINISH);
assert(write_length == length);
azflush(&writer_handle, Z_SYNC_FLUSH);
printf("Reading back data\n");
......@@ -102,29 +206,13 @@ int main(int argc __attribute__((unused)), char *argv[])
assert(!memcmp(buffer, TEST_STRING, ret));
if (ret != BUFFER_LEN)
{
printf("Size %lu\n", ret);
printf("Size %u\n", ret);
assert(ret != BUFFER_LEN);
}
}
assert(read_length == TWOGIG);
azclose(&writer_handle);
azclose(&reader_handle);
unlink(TEST_FILENAME);
if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)))
{
printf("Could not create test file\n");
return 0;
}
for (write_length= 0; write_length < FOURGIG ; write_length+= ret)
{
ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN);
assert(ret == BUFFER_LEN);
}
assert(write_length == FOURGIG);
printf("Read %lu bytes, expected %lu\n", write_length, FOURGIG);
assert(read_length == length);
assert(writer_handle.rows == rows_to_test_for);
azclose(&writer_handle);
azclose(&reader_handle);
unlink(TEST_FILENAME);
......
......@@ -7,7 +7,6 @@
* Copyright (C) 1995-2005 Jean-loup Gailly.
* For conditions of distribution and use, see copyright notice in zlib.h
*
* Compile this file with -DNO_GZCOMPRESS to avoid the compression code.
*/
/* @(#) $Id$ */
......@@ -32,9 +31,11 @@ int az_open(azio_stream *s, const char *path, int Flags, File fd);
int do_flush(azio_stream *file, int flush);
int get_byte(azio_stream *s);
void check_header(azio_stream *s);
void write_header(azio_stream *s);
int destroy(azio_stream *s);
void putLong(File file, uLong x);
uLong getLong(azio_stream *s);
void read_header(azio_stream *s, unsigned char *buffer);
/* ===========================================================================
Opens a gzip (.gz) file for reading or writing. The mode parameter
......@@ -48,14 +49,14 @@ uLong getLong(azio_stream *s);
int az_open (azio_stream *s, const char *path, int Flags, File fd)
{
int err;
int level = Z_DEFAULT_COMPRESSION; /* compression level */
int level = Z_NO_COMPRESSION; /* Z_DEFAULT_COMPRESSION;*/ /* compression level */
int strategy = Z_DEFAULT_STRATEGY; /* compression strategy */
s->stream.zalloc = (alloc_func)0;
s->stream.zfree = (free_func)0;
s->stream.opaque = (voidpf)0;
memset(s->inbuf, 0, Z_BUFSIZE);
memset(s->outbuf, 0, Z_BUFSIZE);
memset(s->inbuf, 0, AZ_BUFSIZE);
memset(s->outbuf, 0, AZ_BUFSIZE);
s->stream.next_in = s->inbuf;
s->stream.next_out = s->outbuf;
s->stream.avail_in = s->stream.avail_out = 0;
......@@ -69,19 +70,23 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
s->mode = 'r';
s->version = (unsigned char)az_magic[1]; /* this needs to be a define to version */
if (Flags & O_WRONLY || Flags & O_APPEND)
/*
We do our own version of append by nature.
We must always have write access to take card of the header.
*/
DBUG_ASSERT(Flags | O_APPEND);
DBUG_ASSERT(Flags | O_WRONLY);
if (Flags & O_RDWR)
s->mode = 'w';
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
err = Z_STREAM_ERROR;
#else
if (s->mode == 'w')
{
err = deflateInit2(&(s->stream), level,
Z_DEFLATED, -MAX_WBITS, 8, strategy);
/* windowBits is passed < 0 to suppress zlib header */
s->stream.next_out = s->outbuf;
#endif
if (err != Z_OK)
{
destroy(s);
......@@ -103,7 +108,7 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
return Z_NULL;
}
}
s->stream.avail_out = Z_BUFSIZE;
s->stream.avail_out = AZ_BUFSIZE;
errno = 0;
s->file = fd < 0 ? my_open(path, Flags, MYF(0)) : fd;
......@@ -113,39 +118,64 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
destroy(s);
return Z_NULL;
}
if (s->mode == 'w') {
char buffer[AZHEADER_SIZE];
char *ptr;
/* Write a very simple .gz header: */
bzero(buffer, AZHEADER_SIZE);
buffer[0] = az_magic[0];
buffer[1] = az_magic[1];
buffer[2] = (unsigned char)0; /* Reserved for block size */
buffer[3] = (unsigned char)0; /* Compression Type */
ptr= buffer + 4;
int4store(ptr, 0LL); /* FRM Block */
ptr+= sizeof(unsigned long);
int4store(ptr, 0LL); /* Meta Block */
ptr+= sizeof(unsigned long);
int4store(ptr, (unsigned long)AZHEADER_SIZE); /* Start of Data Block Index Block */
ptr+= sizeof(unsigned long);
s->start = AZHEADER_SIZE;
s->version = (unsigned char)az_magic[1];
my_write(s->file, buffer, (uint)s->start, MYF(0));
/* We use 10L instead of ftell(s->file) to because ftell causes an
* fflush on some systems. This version of the library doesn't use
* start anyway in write mode, so this initialization is not
* necessary.
*/
} else {
check_header(s); /* skip the .gz header */
s->start = my_tell(s->file, MYF(0)) - s->stream.avail_in;
if (Flags & O_CREAT || Flags & O_TRUNC)
{
s->rows= 0;
s->forced_flushes= 0;
s->auto_increment= 0;
s->check_point= 0;
s->dirty= 1; /* We create the file dirty */
write_header(s);
my_seek(s->file, 0, MY_SEEK_END, MYF(0));
}
else if (s->mode == 'w')
{
unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, MYF(0));
read_header(s, buffer); /* skip the .az header */
my_seek(s->file, 0, MY_SEEK_END, MYF(0));
}
else
{
check_header(s); /* skip the .az header */
}
return 1;
}
void write_header(azio_stream *s)
{
char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
char *ptr= buffer;
s->start = AZHEADER_SIZE + AZMETA_BUFFER_SIZE;
s->block_size= AZ_BUFSIZE;
s->version = (unsigned char)az_magic[1];
/* Write a very simple .az header: */
bzero(buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE);
*(ptr + AZ_MAGIC_POS)= az_magic[0];
*(ptr + AZ_VERSION_POS)= (unsigned char)s->version;
*(ptr + AZ_BLOCK_POS)= (unsigned char)(s->block_size/1024); /* Reserved for block size */
*(ptr + AZ_STRATEGY_POS)= (unsigned char)Z_DEFAULT_STRATEGY; /* Compression Type */
int4store(ptr + AZ_FRM_POS, 0); /* FRM Block */
int4store(ptr + AZ_META_POS, 0); /* Meta Block */
int8store(ptr + AZ_START_POS, (unsigned long long)s->start); /* Start of Data Block Index Block */
printf("ROWS %llu\n", s->rows);
int8store(ptr + AZ_ROW_POS, (unsigned long long)s->rows); /* Start of Data Block Index Block */
int8store(ptr + AZ_FLUSH_POS, (unsigned long long)s->forced_flushes); /* Start of Data Block Index Block */
int8store(ptr + AZ_CHECK_POS, (unsigned long long)s->check_point); /* Start of Data Block Index Block */
int8store(ptr + AZ_AUTOINCREMENT_POS, (unsigned long long)s->auto_increment); /* Start of Data Block Index Block */
*(ptr + AZ_DIRTY_POS)= (unsigned char)s->dirty; /* Start of Data Block Index Block */
/* Always begin at the begining, and end there as well */
my_pwrite(s->file, buffer, (uint)s->start, 0, MYF(0));
}
/* ===========================================================================
Opens a gzip (.gz) file for reading or writing.
*/
......@@ -177,7 +207,7 @@ int get_byte(s)
if (s->stream.avail_in == 0)
{
errno = 0;
s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, Z_BUFSIZE, MYF(0));
s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE, MYF(0));
if (s->stream.avail_in == 0)
{
s->z_eof = 1;
......@@ -213,7 +243,7 @@ void check_header(azio_stream *s)
if (len < 2) {
if (len) s->inbuf[0] = s->stream.next_in[0];
errno = 0;
len = (uInt)my_read(s->file, (byte *)s->inbuf + len, Z_BUFSIZE >> len, MYF(0));
len = (uInt)my_read(s->file, (byte *)s->inbuf + len, AZ_BUFSIZE >> len, MYF(0));
if (len == 0) s->z_err = Z_ERRNO;
s->stream.avail_in += len;
s->stream.next_in = s->inbuf;
......@@ -257,18 +287,41 @@ void check_header(azio_stream *s)
for (len = 0; len < 2; len++) (void)get_byte(s);
}
s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK;
s->start = my_tell(s->file, MYF(0)) - s->stream.avail_in;
}
else if ( s->stream.next_in[0] == az_magic[0] && s->stream.next_in[1] == az_magic[1])
{
s->stream.avail_in -= 2;
s->stream.next_in += 2;
for (len = 0; len < (AZHEADER_SIZE-2); len++) (void)get_byte(s);
unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
for (len = 0; len < (AZHEADER_SIZE + AZMETA_BUFFER_SIZE); len++)
buffer[len]= get_byte(s);
s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK;
read_header(s, buffer);
}
else
{
s->z_err = Z_OK;
return;
}
}
void read_header(azio_stream *s, unsigned char *buffer)
{
if (buffer[0] == az_magic[0] && buffer[1] == az_magic[1])
{
s->version= (unsigned int)buffer[AZ_VERSION_POS];
s->block_size= 1024 * buffer[AZ_BLOCK_POS];
s->start= (unsigned long long)uint8korr(buffer + AZ_START_POS);
s->rows= (unsigned long long)uint8korr(buffer + AZ_ROW_POS);
s->check_point= (unsigned long long)uint8korr(buffer + AZ_CHECK_POS);
s->forced_flushes= (unsigned long long)uint8korr(buffer + AZ_FLUSH_POS);
s->auto_increment= (unsigned long long)uint8korr(buffer + AZ_AUTOINCREMENT_POS);
s->dirty= (unsigned int)buffer[AZ_DIRTY_POS];
}
else
{
s->transparent = 1;
s->version = (unsigned char)0;
DBUG_ASSERT(buffer[0] == az_magic[0] && buffer[1] == az_magic[1]);
return;
}
}
......@@ -284,11 +337,7 @@ int destroy (s)
if (s->stream.state != NULL) {
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
err = Z_STREAM_ERROR;
#else
err = deflateEnd(&(s->stream));
#endif
}
else if (s->mode == 'r')
{
......@@ -311,7 +360,7 @@ int destroy (s)
Reads the given number of uncompressed bytes from the compressed file.
azread returns the number of bytes actually read (0 for end of file).
*/
unsigned long ZEXPORT azread ( azio_stream *s, voidp buf, unsigned long len, int *error)
unsigned int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned int len, int *error)
{
Bytef *start = (Bytef*)buf; /* starting point for crc computation */
Byte *next_out; /* == stream.next_out but not forced far (for MSDOS) */
......@@ -383,7 +432,7 @@ unsigned long ZEXPORT azread ( azio_stream *s, voidp buf, unsigned long len, int
if (s->stream.avail_in == 0 && !s->z_eof) {
errno = 0;
s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, Z_BUFSIZE, MYF(0));
s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE, MYF(0));
if (s->stream.avail_in == 0)
{
s->z_eof = 1;
......@@ -410,7 +459,8 @@ unsigned long ZEXPORT azread ( azio_stream *s, voidp buf, unsigned long len, int
* Check for such files:
*/
check_header(s);
if (s->z_err == Z_OK) {
if (s->z_err == Z_OK)
{
inflateReset(&(s->stream));
s->crc = crc32(0L, Z_NULL, 0);
}
......@@ -432,29 +482,30 @@ unsigned long ZEXPORT azread ( azio_stream *s, voidp buf, unsigned long len, int
}
#ifndef NO_GZCOMPRESS
/* ===========================================================================
Writes the given number of uncompressed bytes into the compressed file.
azwrite returns the number of bytes actually written (0 in case of error).
*/
unsigned long azwrite (azio_stream *s, voidpc buf, unsigned long len)
unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len)
{
s->stream.next_in = (Bytef*)buf;
s->stream.avail_in = len;
s->rows++;
while (s->stream.avail_in != 0)
{
if (s->stream.avail_out == 0)
{
s->stream.next_out = s->outbuf;
if (my_write(s->file, (byte *)s->outbuf, Z_BUFSIZE, MYF(0)) != Z_BUFSIZE)
if (my_write(s->file, (byte *)s->outbuf, AZ_BUFSIZE, MYF(0)) != AZ_BUFSIZE)
{
s->z_err = Z_ERRNO;
break;
}
s->stream.avail_out = Z_BUFSIZE;
s->stream.avail_out = AZ_BUFSIZE;
}
s->in += s->stream.avail_in;
s->out += s->stream.avail_out;
......@@ -465,19 +516,15 @@ unsigned long azwrite (azio_stream *s, voidpc buf, unsigned long len)
}
s->crc = crc32(s->crc, (const Bytef *)buf, len);
return (unsigned long)(len - s->stream.avail_in);
return (unsigned int)(len - s->stream.avail_in);
}
#endif
/* ===========================================================================
Flushes all pending output into the compressed file. The parameter
flush is as in the deflate() function.
*/
int do_flush (s, flush)
azio_stream *s;
int flush;
int do_flush (azio_stream *s, int flush)
{
uInt len;
int done = 0;
......@@ -486,8 +533,9 @@ int do_flush (s, flush)
s->stream.avail_in = 0; /* should be zero already anyway */
for (;;) {
len = Z_BUFSIZE - s->stream.avail_out;
for (;;)
{
len = AZ_BUFSIZE - s->stream.avail_out;
if (len != 0) {
if ((uInt)my_write(s->file, (byte *)s->outbuf, len, MYF(0)) != len)
......@@ -496,7 +544,7 @@ int do_flush (s, flush)
return Z_ERRNO;
}
s->stream.next_out = s->outbuf;
s->stream.avail_out = Z_BUFSIZE;
s->stream.avail_out = AZ_BUFSIZE;
}
if (done) break;
s->out += s->stream.avail_out;
......@@ -513,6 +561,11 @@ int do_flush (s, flush)
if (s->z_err != Z_OK && s->z_err != Z_STREAM_END) break;
}
if (flush == Z_FINISH)
s->dirty= 0; /* Mark it clean, we should be good now */
write_header(s);
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
}
......@@ -520,11 +573,25 @@ int ZEXPORT azflush (s, flush)
azio_stream *s;
int flush;
{
int err = do_flush (s, flush);
int err;
if (err) return err;
my_sync(s->file, MYF(0));
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
if (s->mode == 'r')
{
unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, MYF(0));
read_header(s, buffer); /* skip the .az header */
return Z_OK;
}
else
{
s->forced_flushes++;
err= do_flush(s, flush);
if (err) return err;
my_sync(s->file, MYF(0));
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
}
}
/* ===========================================================================
......@@ -566,19 +633,17 @@ my_off_t azseek (s, offset, whence)
return -1L;
}
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
return -1L;
#else
if (whence == SEEK_SET) {
if (s->mode == 'w')
{
if (whence == SEEK_SET)
offset -= s->in;
}
/* At this point, offset is the number of zero bytes to write. */
/* There was a zmemzero here if inbuf was null -Brian */
while (offset > 0) {
uInt size = Z_BUFSIZE;
if (offset < Z_BUFSIZE) size = (uInt)offset;
while (offset > 0)
{
uInt size = AZ_BUFSIZE;
if (offset < AZ_BUFSIZE) size = (uInt)offset;
size = azwrite(s, s->inbuf, size);
if (size == 0) return -1L;
......@@ -586,7 +651,6 @@ my_off_t azseek (s, offset, whence)
offset -= size;
}
return s->in;
#endif
}
/* Rest of function is for reading only */
......@@ -622,8 +686,8 @@ my_off_t azseek (s, offset, whence)
}
while (offset > 0) {
int error;
unsigned long size = Z_BUFSIZE;
if (offset < Z_BUFSIZE) size = (int)offset;
unsigned int size = AZ_BUFSIZE;
if (offset < AZ_BUFSIZE) size = (int)offset;
size = azread(s, s->outbuf, size, &error);
if (error <= 0) return -1L;
......@@ -687,56 +751,13 @@ int azclose (azio_stream *s)
if (s == NULL) return Z_STREAM_ERROR;
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
return Z_STREAM_ERROR;
#else
if (s->mode == 'w')
{
if (do_flush (s, Z_FINISH) != Z_OK)
return destroy(s);
putLong(s->file, s->crc);
putLong(s->file, (uLong)(s->in & 0xffffffff));
#endif
}
return destroy(s);
}
/*
This function reads the header of meta block and returns whether or not it was successful.
*rows will contain the current number of rows in the data file upon success.
*/
int az_read_meta_block(char *meta_start, unsigned long *rows,
unsigned long long *auto_increment,
unsigned long long *forced_flushes)
{
unsigned char *ptr= meta_start;
ulonglong check_point;
DBUG_ENTER("ha_archive::read_meta_file");
/*
Parse out the meta data, we ignore version at the moment
*/
*rows= (unsigned long long)uint8korr(ptr);
ptr+= sizeof(unsigned long long); // Move past rows
check_point= uint8korr(ptr);
ptr+= sizeof(unsigned long long); // Move past check_point
*auto_increment= uint8korr(ptr);
ptr+= sizeof(unsigned long long); // Move past auto_increment
*forced_flushes= uint8korr(ptr);
ptr+= sizeof(unsigned long long); // Move past forced_flush
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu",
(long long unsigned)*rows));
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu",
(long long unsigned) check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu",
(long long unsigned)*auto_increment));
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu",
(long long unsigned)*forced_flushes));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
DBUG_RETURN(0);
}
......@@ -36,6 +36,7 @@
#include <zlib.h>
#include "../../mysys/mysys_priv.h"
#include <my_dir.h>
#ifdef __cplusplus
extern "C" {
......@@ -45,11 +46,24 @@ extern "C" {
/*
ulonglong + ulonglong + ulonglong + ulonglong + uchar
*/
#define AZMETA_BUFFER_SIZE sizeof(ulonglong) \
+ sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) \
+ sizeof(uchar)
#define AZHEADER_SIZE 16
#define AZMETA_BUFFER_SIZE sizeof(unsigned long long) \
+ sizeof(unsigned long long) + sizeof(unsigned long long) + sizeof(unsigned long long) \
+ sizeof(unsigned char)
#define AZHEADER_SIZE 20
#define AZ_MAGIC_POS 0
#define AZ_VERSION_POS 1
#define AZ_BLOCK_POS 2
#define AZ_STRATEGY_POS 3
#define AZ_FRM_POS 4
#define AZ_META_POS 8
#define AZ_START_POS 12
#define AZ_ROW_POS 20
#define AZ_FLUSH_POS 28
#define AZ_CHECK_POS 36
#define AZ_AUTOINCREMENT_POS 44
#define AZ_DIRTY_POS 52
/*
The 'zlib' compression library provides in-memory compression and
......@@ -164,7 +178,7 @@ extern "C" {
/* The deflate compression method (the only one supported in this version) */
#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
#define Z_BUFSIZE 16384
#define AZ_BUFSIZE 16384
typedef struct azio_stream {
......@@ -172,8 +186,8 @@ typedef struct azio_stream {
int z_err; /* error code for last stream operation */
int z_eof; /* set if end of input file */
File file; /* .gz file */
Byte inbuf[Z_BUFSIZE]; /* input buffer */
Byte outbuf[Z_BUFSIZE]; /* output buffer */
Byte inbuf[AZ_BUFSIZE]; /* input buffer */
Byte outbuf[AZ_BUFSIZE]; /* output buffer */
uLong crc; /* crc32 of uncompressed data */
char *msg; /* error message */
int transparent; /* 1 if input file is not a .gz file */
......@@ -184,6 +198,12 @@ typedef struct azio_stream {
int back; /* one character push-back */
int last; /* true if push-back is last character */
unsigned char version; /* Version */
unsigned int block_size; /* Block Size */
unsigned long long check_point; /* Last position we checked */
unsigned long long forced_flushes; /* Forced Flushes */
unsigned long long rows; /* rows */
unsigned long long auto_increment; /* auto increment field */
unsigned char dirty; /* State of file */
} azio_stream;
/* basic functions */
......@@ -219,7 +239,7 @@ int azdopen(azio_stream *s,File fd, int Flags);
*/
extern unsigned long azread ( azio_stream *s, voidp buf, unsigned long len, int *error);
extern unsigned int azread ( azio_stream *s, voidp buf, unsigned int len, int *error);
/*
Reads the given number of uncompressed bytes from the compressed file.
If the input file was not in gzip format, gzread copies the given number
......@@ -227,10 +247,10 @@ extern unsigned long azread ( azio_stream *s, voidp buf, unsigned long len, int
gzread returns the number of uncompressed bytes actually read (0 for
end of file, -1 for error). */
extern unsigned long azwrite (azio_stream *s, voidpc buf, unsigned long len);
extern unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len);
/*
Writes the given number of uncompressed bytes into the compressed file.
gzwrite returns the number of uncompressed bytes actually written
azwrite returns the number of uncompressed bytes actually written
(0 in case of error).
*/
......
......@@ -121,7 +121,7 @@ static HASH archive_open_tables;
/* The file extension */
#define ARZ ".ARZ" // The data file
#define ARN ".ARN" // Files used during an optimize call
#define ARM ".ARM" // Meta file
#define ARM ".ARM" // Meta file (deprecated)
/*
uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + FN_REFLEN
+ uchar
......@@ -281,127 +281,6 @@ int ha_archive::read_data_header(azio_stream *file_to_read)
DBUG_RETURN(0);
}
/*
This method reads the header of a meta file and returns whether or not it was successful.
*rows will contain the current number of rows in the data file upon success.
*/
int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
uint *meta_version,
ulonglong *auto_increment,
ulonglong *forced_flushes,
char *real_path)
{
uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
ulonglong check_point;
DBUG_ENTER("ha_archive::read_meta_file");
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
DBUG_RETURN(-1);
/*
Parse out the meta data, we ignore version at the moment
*/
ptr+= sizeof(uchar)*2; // Move past header
*rows= (ha_rows)uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past rows
check_point= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past check_point
*auto_increment= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past auto_increment
*forced_flushes= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past forced_flush
memmove(real_path, ptr, FN_REFLEN);
ptr+= FN_REFLEN; // Move past the possible location of the file
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu",
(long long unsigned)*rows));
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu",
(long long unsigned) check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu",
(long long unsigned)*auto_increment));
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu",
(long long unsigned)*forced_flushes));
DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
*meta_version= (uchar)meta_buffer[1];
if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
((bool)(*ptr)== TRUE))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
my_sync(meta_file, MYF(MY_WME));
DBUG_RETURN(0);
}
/*
This method writes out the header of a meta file and returns whether or not it was successful.
By setting dirty you say whether or not the file represents the actual state of the data file.
Upon ::open() we set to dirty, and upon ::close() we set to clean.
*/
int ha_archive::write_meta_file(File meta_file, ha_rows rows,
ulonglong auto_increment,
ulonglong forced_flushes,
char *real_path,
bool dirty)
{
uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
ulonglong check_point= 0; //Reserved for the future
DBUG_ENTER("ha_archive::write_meta_file");
*ptr= (uchar)ARCHIVE_CHECK_HEADER;
ptr += sizeof(uchar);
*ptr= (uchar)ARCHIVE_VERSION;
ptr += sizeof(uchar);
int8store(ptr, (ulonglong)rows);
ptr += sizeof(ulonglong);
int8store(ptr, check_point);
ptr += sizeof(ulonglong);
int8store(ptr, auto_increment);
ptr += sizeof(ulonglong);
int8store(ptr, forced_flushes);
ptr += sizeof(ulonglong);
// No matter what, we pad with nulls
if (real_path)
strncpy((char *)ptr, real_path, FN_REFLEN);
else
bzero(ptr, FN_REFLEN);
ptr += FN_REFLEN;
*ptr= (uchar)dirty;
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d",
(uint)ARCHIVE_CHECK_HEADER));
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d",
(uint)ARCHIVE_VERSION));
DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu",
(unsigned long long)rows));
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu",
(unsigned long long)check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %llu",
(unsigned long long)auto_increment));
DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu",
(unsigned long long)forced_flushes));
DBUG_PRINT("ha_archive::write_meta_file", ("Real path %s",
real_path));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
DBUG_RETURN(-1);
my_sync(meta_file, MYF(MY_WME));
DBUG_RETURN(0);
}
/*
We create the shared memory space that we will use for the open table.
......@@ -414,9 +293,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
TABLE *table, int *rc)
{
ARCHIVE_SHARE *share;
char meta_file_name[FN_REFLEN];
uint length;
char *tmp_name;
DBUG_ENTER("ha_archive::get_share");
pthread_mutex_lock(&archive_mutex);
......@@ -426,6 +303,9 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
(byte*) table_name,
length)))
{
char *tmp_name;
char tmp_file_name[FN_REFLEN];
azio_stream archive_tmp;
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
&share, sizeof(*share),
&tmp_name, length+1,
......@@ -443,34 +323,29 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
share->archive_write_open= FALSE;
fn_format(share->data_file_name, table_name, "",
ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
fn_format(meta_file_name, table_name, "", ARM,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
strmov(share->table_name,table_name);
/*
We will use this lock for rows.
*/
VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
share->crashed= TRUE;
DBUG_PRINT("ha_archive", ("archive opening (1) up write at %s",
share->data_file_name));
/*
We read the meta file, but do not mark it dirty unless we actually do
a write.
*/
if (read_meta_file(share->meta_file, &share->rows_recorded,
&share->meta_version,
&share->auto_increment_value,
&share->forced_flushes,
share->real_path))
share->crashed= TRUE;
/*
Since we now possibly no real_path, we will use it instead if it exists.
We read the meta file, but do not mark it dirty. Since we are not
doing a write we won't mark it dirty (and we won't open it for
anything but reading... open it for write and we will generate null
compression writes).
*/
if (*share->real_path)
fn_format(share->data_file_name, share->real_path, "", ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY|O_BINARY)))
{
DBUG_RETURN(NULL);
}
stats.auto_increment_value= archive_tmp.auto_increment;
share->rows_recorded= archive_tmp.rows;
share->crashed= archive_tmp.dirty;
azclose(&archive_tmp);
VOID(my_hash_insert(&archive_open_tables, (byte*) share));
thr_lock_init(&share->lock);
}
......@@ -490,20 +365,20 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
Free the share.
See ha_example.cc for a description.
*/
int ha_archive::free_share(ARCHIVE_SHARE *share)
int ha_archive::free_share(ARCHIVE_SHARE *share_to_free)
{
int rc= 0;
DBUG_ENTER("ha_archive::free_share");
DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles on entrance",
share->table_name_length, share->table_name,
share->use_count));
share_to_free->table_name_length, share_to_free->table_name,
share_to_free->use_count));
pthread_mutex_lock(&archive_mutex);
if (!--share->use_count)
if (!--share_to_free->use_count)
{
hash_delete(&archive_open_tables, (byte*) share);
thr_lock_delete(&share->lock);
VOID(pthread_mutex_destroy(&share->mutex));
hash_delete(&archive_open_tables, (byte*) share_to_free);
thr_lock_delete(&share_to_free->lock);
VOID(pthread_mutex_destroy(&share_to_free->mutex));
/*
We need to make sure we don't reset the crashed state.
If we open a crashed file, wee need to close it as crashed unless
......@@ -511,18 +386,12 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
Since we will close the data down after this, we go on and count
the flush on close;
*/
share->forced_flushes++;
(void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value,
share->forced_flushes,
share->real_path,
share->crashed ? TRUE :FALSE);
if (share->archive_write_open)
if (azclose(&(share->archive_write)))
if (share_to_free->archive_write_open)
{
if (azclose(&(share_to_free->archive_write)))
rc= 1;
if (my_close(share->meta_file, MYF(0)))
rc= 1;
my_free((gptr) share, MYF(0));
}
my_free((gptr) share_to_free, MYF(0));
}
pthread_mutex_unlock(&archive_mutex);
......@@ -532,19 +401,13 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
int ha_archive::init_archive_writer()
{
DBUG_ENTER("ha_archive::init_archive_writer");
(void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value,
share->forced_flushes,
share->real_path,
TRUE);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
if (!(azopen(&(share->archive_write), share->data_file_name,
O_WRONLY|O_APPEND|O_BINARY)))
O_RDWR|O_BINARY)))
{
DBUG_PRINT("ha_archive", ("Could not open archive write file"));
share->crashed= TRUE;
......@@ -561,7 +424,6 @@ int ha_archive::init_archive_writer()
*/
static const char *ha_archive_exts[] = {
ARZ,
ARM,
NullS
};
......@@ -674,15 +536,16 @@ int ha_archive::close(void)
int ha_archive::create(const char *name, TABLE *table_arg,
HA_CREATE_INFO *create_info)
{
File create_file; // We use to create the datafile and the metafile
char name_buff[FN_REFLEN];
char linkname[FN_REFLEN];
int error;
azio_stream create_stream; /* Archive file we are working with */
DBUG_ENTER("ha_archive::create");
stats.auto_increment_value= (create_info->auto_increment_value ?
create_info->auto_increment_value -1 :
(ulonglong) 0);
(ulonglong) 0);
for (uint key= 0; key < table_arg->s->keys; key++)
{
......@@ -703,25 +566,11 @@ int ha_archive::create(const char *name, TABLE *table_arg,
}
}
if ((create_file= my_create(fn_format(name_buff,name,"",ARM,
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
{
error= my_errno;
goto error;
}
write_meta_file(create_file, 0, stats.auto_increment_value, 0,
(char *)create_info->data_file_name,
FALSE);
my_close(create_file,MYF(0));
/*
We reuse name_buff since it is available.
*/
if (create_info->data_file_name)
{
char linkname[FN_REFLEN];
DBUG_PRINT("ha_archive", ("archive will create stream file %s",
create_info->data_file_name));
......@@ -729,29 +578,26 @@ int ha_archive::create(const char *name, TABLE *table_arg,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
fn_format(linkname, name, "", ARZ,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
if ((create_file= my_create_with_symlink(linkname, name_buff, 0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
{
error= my_errno;
goto error;
}
}
else
{
if ((create_file= my_create(fn_format(name_buff, name,"", ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
{
error= my_errno;
goto error;
}
fn_format(name_buff, name,"", ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
linkname[0]= 0;
}
if (!azdopen(&create_stream, create_file, O_WRONLY|O_BINARY))
if (!(azopen(&create_stream, linkname[0] ? linkname : name_buff, O_CREAT|O_RDWR|O_BINARY)))
{
error= errno;
goto error2;
}
/*
Yes you need to do this, because the starting value
for the autoincrement may not be zero.
*/
create_stream.auto_increment= stats.auto_increment_value;
if (azclose(&create_stream))
{
error= errno;
......@@ -761,7 +607,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
DBUG_RETURN(0);
error2:
my_close(create_file, MYF(0));
azclose(&create_stream);
delete_table(name);
error:
/* Return error number, if we got one */
......@@ -775,29 +621,16 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer)
{
my_off_t written;
uint *ptr, *end;
unsigned long r_pack_length;
byte size_buffer[ARCHIVE_ROW_HEADER_SIZE]; // Longest possible row length with blobs
unsigned int r_pack_length;
DBUG_ENTER("ha_archive::real_write_row");
// We pack the row for writing
r_pack_length= pack_row(buf);
DBUG_PRINT("ha_archive",("Pack row length %lu", r_pack_length));
// Store the size of the row before the row
bzero(size_buffer, ARCHIVE_ROW_HEADER_SIZE);
int4store(size_buffer, (int)r_pack_length);
written= azwrite(writer, size_buffer, ARCHIVE_ROW_HEADER_SIZE);
if (written != ARCHIVE_ROW_HEADER_SIZE)
{
DBUG_PRINT("ha_archive", ("Died writing row header"));
DBUG_RETURN(-1);
}
written= azwrite(writer, record_buffer->buffer, r_pack_length);
if (written != r_pack_length)
{
DBUG_PRINT("ha_archive", ("Wrote %llu bytes expected %lu",
DBUG_PRINT("ha_archive", ("Wrote %llu bytes expected %u",
(unsigned long long) written,
r_pack_length));
DBUG_RETURN(-1);
......@@ -815,6 +648,7 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer)
int ha_archive::max_row_length(const byte *buf)
{
ulonglong length= table->s->reclength + table->s->fields*2;
length+= ARCHIVE_ROW_HEADER_SIZE;
uint *ptr, *end;
for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
......@@ -829,7 +663,7 @@ int ha_archive::max_row_length(const byte *buf)
}
unsigned long ha_archive::pack_row(const byte *record)
unsigned int ha_archive::pack_row(const byte *record)
{
byte *ptr;
......@@ -841,15 +675,23 @@ unsigned long ha_archive::pack_row(const byte *record)
DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */
}
/* Copy null bits */
memcpy(record_buffer->buffer, record, table->s->null_bytes);
ptr= record_buffer->buffer + table->s->null_bytes;
memcpy(record_buffer->buffer+ARCHIVE_ROW_HEADER_SIZE,
record, table->s->null_bytes);
ptr= record_buffer->buffer + table->s->null_bytes + ARCHIVE_ROW_HEADER_SIZE;
for (Field **field=table->field ; *field ; field++)
ptr=(byte*) (*field)->pack((char*) ptr,
(char*) record + (*field)->offset());
DBUG_RETURN((unsigned long) (ptr - record_buffer->buffer));
int4store(record_buffer->buffer, (int)(ptr - record_buffer->buffer -
ARCHIVE_ROW_HEADER_SIZE));
DBUG_PRINT("ha_archive",("Pack row length %u", (unsigned int)
(ptr - record_buffer->buffer -
ARCHIVE_ROW_HEADER_SIZE)));
DBUG_RETURN((unsigned int) (ptr - record_buffer->buffer));
}
......@@ -886,19 +728,23 @@ int ha_archive::write_row(byte *buf)
temp_auto= table->next_number_field->val_int();
/*
Bad news, this will cause a search for the unique value which is very
expensive since we will have to do a table scan which will lock up
all other writers during this period. This could perhaps be optimized
in the future.
Simple optimization to see if we fail for duplicate key immediatly
because we have just given out this value.
*/
if (temp_auto == share->auto_increment_value &&
if (temp_auto == share->archive_write.auto_increment &&
mkey->flags & HA_NOSAME)
{
rc= HA_ERR_FOUND_DUPP_KEY;
goto error;
}
if (temp_auto < share->auto_increment_value &&
/*
Bad news, this will cause a search for the unique value which is very
expensive since we will have to do a table scan which will lock up
all other writers during this period. This could perhaps be optimized
in the future.
*/
if (temp_auto < share->archive_write.auto_increment &&
mkey->flags & HA_NOSAME)
{
/*
......@@ -915,7 +761,6 @@ int ha_archive::write_row(byte *buf)
data
*/
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
/*
Set the position of the local read thread to the beginning postion.
*/
......@@ -939,8 +784,8 @@ int ha_archive::write_row(byte *buf)
}
else
{
if (temp_auto > share->auto_increment_value)
stats.auto_increment_value= share->auto_increment_value= temp_auto;
if (temp_auto > share->archive_write.auto_increment)
stats.auto_increment_value= share->archive_write.auto_increment= temp_auto;
}
}
......@@ -969,7 +814,7 @@ void ha_archive::get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong *nb_reserved_values)
{
*nb_reserved_values= 1;
*first_value= share->auto_increment_value + 1;
*first_value= share->archive_write.auto_increment + 1;
}
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
......@@ -1014,7 +859,6 @@ int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
*/
pthread_mutex_lock(&share->mutex);
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
pthread_mutex_unlock(&share->mutex);
/*
......@@ -1093,7 +937,6 @@ int ha_archive::rnd_init(bool scan)
{
DBUG_PRINT("ha_archive", ("archive flushing out rows for scan"));
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
share->dirty= FALSE;
}
pthread_mutex_unlock(&share->mutex);
......@@ -1131,10 +974,12 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
/* Reallocate buffer if needed */
bool ha_archive::fix_rec_buff(int length)
{
if (! record_buffer->buffer || length > record_buffer->length)
if (! record_buffer->buffer ||
length > (record_buffer->length + ARCHIVE_ROW_HEADER_SIZE))
{
byte *newptr;
if (!(newptr=(byte*) my_realloc((gptr) record_buffer->buffer, length,
if (!(newptr=(byte*) my_realloc((gptr) record_buffer->buffer,
length + ARCHIVE_ROW_HEADER_SIZE,
MYF(MY_ALLOW_ZERO_PTR))))
return 1; /* purecov: inspected */
record_buffer->buffer= newptr;
......@@ -1147,10 +992,10 @@ int ha_archive::unpack_row(azio_stream *file_to_read, char *record)
{
DBUG_ENTER("ha_archive::unpack_row");
unsigned long read;
unsigned int read;
int error;
byte size_buffer[ARCHIVE_ROW_HEADER_SIZE];
unsigned long row_len;
unsigned int row_len;
/* First we grab the length stored */
read= azread(file_to_read, (byte *)size_buffer, ARCHIVE_ROW_HEADER_SIZE, &error);
......@@ -1162,13 +1007,15 @@ int ha_archive::unpack_row(azio_stream *file_to_read, char *record)
if (read == 0 || read != ARCHIVE_ROW_HEADER_SIZE)
DBUG_RETURN(HA_ERR_END_OF_FILE);
row_len= sint4korr(size_buffer);
DBUG_PRINT("ha_archive",("Unpack row length %lu -> %lu", row_len,
(unsigned long)table->s->reclength));
row_len= uint4korr(size_buffer);
DBUG_PRINT("ha_archive",("Unpack row length %u -> %u", row_len,
(unsigned int)table->s->reclength));
fix_rec_buff(row_len);
read= azread(file_to_read, record_buffer->buffer, row_len, &error);
DBUG_ASSERT(row_len == read);
if (read != row_len || error)
{
DBUG_RETURN(-1);
......@@ -1195,7 +1042,7 @@ int ha_archive::get_row_version3(azio_stream *file_to_read, byte *buf)
int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf)
{
unsigned long read;
unsigned int read;
int error;
uint *ptr, *end;
char *last;
......@@ -1207,9 +1054,9 @@ int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf)
if (read != table->s->reclength)
{
DBUG_PRINT("ha_archive::get_row_version2", ("Read %lu bytes expected %lu",
DBUG_PRINT("ha_archive::get_row_version2", ("Read %u bytes expected %u",
read,
(unsigned long)table->s->reclength));
(unsigned int)table->s->reclength));
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
}
......@@ -1362,26 +1209,31 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
char writer_filename[FN_REFLEN];
/* Open up the writer if we haven't yet */
if (!share->archive_write_open)
if (share->archive_write_open)
{
/* Flush any waiting data */
azflush(&(share->archive_write), Z_SYNC_FLUSH);
}
else
{
init_archive_writer();
/* Flush any waiting data */
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
}
/* Lets create a file to contain the new data */
fn_format(writer_filename, share->table_name, "", ARN,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
if (!(azopen(&writer, writer_filename, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)))
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR|O_BINARY)))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
/*
An extended rebuild is a lot more effort. We open up each row and re-record it.
Any dead rows are removed (aka rows that may have been partially recorded).
*/
if (check_opt->flags == T_EXTEND)
As of Archive format 3, this is the only type that is performed, before this
version it was just done on T_EXTEND
*/
if (1)
{
DBUG_PRINT("ha_archive", ("archive extended rebuild"));
byte *buf;
......@@ -1421,26 +1273,35 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
if (!rc)
{
share->rows_recorded= 0;
stats.auto_increment_value= share->auto_increment_value= 0;
stats.auto_increment_value= share->archive_write.auto_increment= 0;
record_buffer= read_buffer;
while (!(rc= get_row(&archive, buf)))
{
record_buffer= write_buffer;
real_write_row(buf, &writer);
/*
Long term it should be possible to optimize this so that
it is not called on each row.
*/
if (table->found_next_number_field)
{
Field *field= table->found_next_number_field;
ulonglong auto_value=
(ulonglong) field->val_int((char*)(buf + field->offset()));
if (share->auto_increment_value < auto_value)
stats.auto_increment_value= share->auto_increment_value=
(ulonglong) field->val_int((char*)
(buf + field->offset()));
DBUG_PRINT("ha_archive::optimize", ("Value %llu\n", (unsigned long long)auto_value));
if (share->archive_write.auto_increment < auto_value)
stats.auto_increment_value= share->archive_write.auto_increment=
auto_value;
}
share->rows_recorded++;
record_buffer= read_buffer;
}
share->rows_recorded= archive.rows;
stats.auto_increment_value= share->archive_write.auto_increment=
writer.auto_increment= archive.auto_increment;
DBUG_PRINT("ha_archive", ("auto to save %llu", writer.auto_increment));
}
DBUG_PRINT("ha_archive", ("recovered %llu archive rows",
......@@ -1453,53 +1314,9 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
if (rc && rc != HA_ERR_END_OF_FILE)
goto error;
}
else
{
DBUG_PRINT("ha_archive", ("archive quick rebuild"));
/*
The quick method is to just read the data raw, and then compress it directly.
*/
unsigned long read, written;
int error;
char block[IO_SIZE];
if (azrewind(&archive) == -1)
{
rc= HA_ERR_CRASHED_ON_USAGE;
DBUG_PRINT("ha_archive", ("crashed on rewinding file"));
goto error;
}
while ((read= azread(&archive, block, IO_SIZE, &error)) > 0)
{
if (error)
{
rc= HA_ERR_CRASHED_ON_USAGE;
DBUG_PRINT("ha_archive", ("azread error on read"));
goto error;
}
written= azwrite(&writer, block, read);
if (written != read)
{
rc= HA_ERR_CRASHED_ON_USAGE;
DBUG_PRINT("ha_archive::real_write_row",
("Crashed wrote %lu bytes expected %lu",
written, read));
goto error;
}
}
if (error)
{
rc= HA_ERR_CRASHED_ON_USAGE;
DBUG_PRINT("ha_archive", ("retrieved zero blocks and error'ed"));
goto error;
}
}
azclose(&writer);
share->dirty= FALSE;
share->forced_flushes= 0;
// now we close both our writer and our reader for the rename
azclose(&(share->archive_write));
......@@ -1517,7 +1334,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
*/
DBUG_PRINT("ha_archive", ("Reopening archive data file"));
if (!azopen(&(share->archive_write), share->data_file_name,
O_WRONLY|O_APPEND|O_BINARY) ||
O_RDWR|O_BINARY) ||
!azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY))
{
DBUG_PRINT("ha_archive", ("Could not open archive write file"));
......@@ -1583,8 +1400,10 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
{
create_info->auto_increment_value= stats.auto_increment_value;
}
if (*share->real_path)
#ifdef DISABLED
if (share->real_path)
create_info->data_file_name= share->real_path;
#endif
}
......@@ -1616,8 +1435,10 @@ int ha_archive::info(uint flag)
stats.delete_length= 0;
stats.index_file_length=0;
/*
if (flag & HA_STATUS_AUTO)
stats.auto_increment_value= share->auto_increment_value;
stats.auto_increment_value= share->archive_write.auto_increment;
*/
DBUG_RETURN(0);
}
......@@ -1685,7 +1506,6 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
old_proc_info= thd_proc_info(thd, "Checking table");
/* Flush any waiting data */
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
/*
First we create a buffer that we can use for reading rows, and can pass
......
......@@ -39,17 +39,12 @@ typedef struct st_archive_share {
uint table_name_length,use_count;
pthread_mutex_t mutex;
THR_LOCK lock;
File meta_file; /* Meta file we use */
azio_stream archive_write; /* Archive file we are working with */
bool archive_write_open;
bool dirty; /* Flag for if a flush should occur */
bool crashed; /* Meta file is crashed */
ha_rows rows_recorded; /* Number of rows in tables */
ulonglong auto_increment_value;
ulonglong forced_flushes;
ulonglong mean_rec_length;
char real_path[FN_REFLEN];
uint meta_version;
} ARCHIVE_SHARE;
/*
......@@ -121,16 +116,6 @@ class ha_archive: public handler
int get_row(azio_stream *file_to_read, byte *buf);
int get_row_version2(azio_stream *file_to_read, byte *buf);
int get_row_version3(azio_stream *file_to_read, byte *buf);
int read_meta_file(File meta_file, ha_rows *rows,
uint *meta_version,
ulonglong *auto_increment,
ulonglong *forced_flushes,
char *real_path);
int write_meta_file(File meta_file, ha_rows rows,
ulonglong auto_increment,
ulonglong forced_flushes,
char *real_path,
bool dirty);
ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc);
int free_share(ARCHIVE_SHARE *share);
int init_archive_writer();
......@@ -156,6 +141,6 @@ class ha_archive: public handler
int max_row_length(const byte *buf);
bool fix_rec_buff(int length);
int unpack_row(azio_stream *file_to_read, char *record);
unsigned long pack_row(const byte *record);
unsigned int pack_row(const byte *record);
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment