Commit 9853e1a9 authored by unknown's avatar unknown

Merge baker@bk-internal.mysql.com:/home/bk/mysql-5.1-arch

into  zim.(none):/home/brian/mysql/archive-format-5.1

parents bd27fcf8 8c2c5767
drop table if exists t1,t2,t3;
drop table if exists t1,t2,t3,t4,t5;
CREATE TABLE t1 (
Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
......
......@@ -6,7 +6,7 @@
-- source include/have_binlog_format_mixed_or_statement.inc
--disable_warnings
drop table if exists t1,t2,t3;
drop table if exists t1,t2,t3,t4,t5;
--enable_warnings
CREATE TABLE t1 (
......
......@@ -30,7 +30,7 @@ LDADD =
DEFS = @DEFS@
noinst_HEADERS = ha_archive.h azlib.h
noinst_PROGRAMS = archive_test
noinst_PROGRAMS = archive_test archive_reader
EXTRA_LTLIBRARIES = ha_archive.la
pkglib_LTLIBRARIES = @plugin_archive_shared_target@
......@@ -55,6 +55,14 @@ archive_test_LDADD = $(top_builddir)/mysys/libmysys.a \
@ZLIB_LIBS@
archive_test_LDFLAGS = @NOINST_LDFLAGS@
archive_reader_SOURCES = archive_reader.c azio.c
archive_reader_CFLAGS = $(AM_CFLAGS)
archive_reader_LDADD = $(top_builddir)/mysys/libmysys.a \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/strings/libmystrings.a \
@ZLIB_LIBS@
archive_reader_LDFLAGS = @NOINST_LDFLAGS@
EXTRA_DIST = CMakeLists.txt plug.in
# Don't update the files from bitkeeper
......
#include "azlib.h"
#include <string.h>
#include <assert.h>
#include <stdio.h>
#define BUFFER_LEN 1024
int main(int argc, char *argv[])
{
unsigned int ret;
azio_stream reader_handle;
MY_INIT(argv[0]);
if (argc < 2)
{
printf("No file specified. \n");
return 0;
}
if (!(ret= azopen(&reader_handle, argv[1], O_RDONLY|O_BINARY)))
{
printf("Could not create test file\n");
return 0;
}
printf("Version :%u\n", reader_handle.version);
printf("Start position :%llu\n", (unsigned long long)reader_handle.start);
if (reader_handle.version > 2)
{
printf("Block size :%u\n", reader_handle.block_size);
printf("Rows: %llu\n", reader_handle.rows);
printf("Autoincrement: %llu\n", reader_handle.auto_increment);
printf("Check Point: %llu\n", reader_handle.check_point);
printf("Forced Flushes: %llu\n", reader_handle.forced_flushes);
printf("State: %s\n", ( reader_handle.dirty ? "dirty" : "clean"));
}
azclose(&reader_handle);
return 0;
}
......@@ -14,50 +14,223 @@
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#include "azlib.h"
#include <string.h>
#include <assert.h>
#include <stdio.h>
#define TEST_STRING "This is a test"
#define TEST_FILENAME "test.az"
#define TEST_STRING "YOU don't know about me without you have read a book by the name of The Adventures of Tom Sawyer; but that ain't no matter. That book was made by Mr. Mark Twain, and he told the truth, mainly. There was things which he stretched, but mainly he told the truth. That is nothing. I never seen anybody but lied one time or another, without it was Aunt Polly, or the widow, or maybe Mary. Aunt Polly--Tom's Aunt Polly, she is--and Mary, and the Widow Douglas is all told about in that book, which is mostly a true book, with some stretchers, as I said before. Now the way that the book winds up is this: Tom and me found the money that the robbers hid in the cave, and it made us rich. We got six thousand dollars apiece--all gold. It was an awful sight of money when it was piled up. Well, Judge Thatcher he took it and put it out at interest, and it fetched us a dollar a day apiece all the year round --more than a body could tell what to do with. The Widow Douglas she took me for her son, and allowed she would..."
#define TEST_LOOP_NUM 100
#define BUFFER_LEN 1024
#define TWOGIG 2147483648
#define FOURGIG 4294967296
#define EIGHTGIG 8589934592
int main(int argc __attribute__((unused)), char *argv[])
/* prototypes */
int size_test(unsigned long long length, unsigned long long rows_to_test_for);
int main(int argc, char *argv[])
{
int ret;
azio_stream foo, foo1;
unsigned int ret;
int error;
unsigned int x;
int written_rows= 0;
azio_stream writer_handle, reader_handle;
char buffer[BUFFER_LEN];
unlink(TEST_FILENAME);
if (argc > 1)
return 0;
MY_INIT(argv[0]);
if (!(ret= azopen(&foo, "test", O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)))
if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_RDWR|O_BINARY)))
{
printf("Could not create test file\n");
return 0;
}
azwrite(&foo, TEST_STRING, sizeof(TEST_STRING));
azflush(&foo, Z_FINISH);
if (!(ret= azopen(&foo1, "test", O_RDONLY|O_BINARY)))
if (!(ret= azopen(&reader_handle, TEST_FILENAME, O_RDONLY|O_BINARY)))
{
printf("Could not open test file\n");
return 0;
}
assert(reader_handle.rows == 0);
assert(reader_handle.auto_increment == 0);
assert(reader_handle.check_point == 0);
assert(reader_handle.forced_flushes == 0);
assert(reader_handle.dirty == 1);
for (x= 0; x < TEST_LOOP_NUM; x++)
{
ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN);
assert(ret == BUFFER_LEN);
written_rows++;
}
azflush(&writer_handle, Z_SYNC_FLUSH);
/* Lets test that our internal stats are good */
assert(writer_handle.rows == TEST_LOOP_NUM);
/* Reader needs to be flushed to make sure it is up to date */
azflush(&reader_handle, Z_SYNC_FLUSH);
assert(reader_handle.rows == TEST_LOOP_NUM);
assert(reader_handle.auto_increment == 0);
assert(reader_handle.check_point == 0);
assert(reader_handle.forced_flushes == 1);
assert(reader_handle.dirty == 1);
writer_handle.auto_increment= 4;
azflush(&writer_handle, Z_SYNC_FLUSH);
assert(writer_handle.rows == TEST_LOOP_NUM);
assert(writer_handle.auto_increment == 4);
assert(writer_handle.check_point == 0);
assert(writer_handle.forced_flushes == 2);
assert(writer_handle.dirty == 1);
if (!(ret= azopen(&reader_handle, TEST_FILENAME, O_RDONLY|O_BINARY)))
{
printf("Could not open test file\n");
return 0;
}
ret= azread(&foo1, buffer, BUFFER_LEN);
printf("Read %d bytes\n", ret);
printf("%s\n", buffer);
azrewind(&foo1);
azclose(&foo);
if (!(ret= azopen(&foo, "test", O_APPEND|O_WRONLY|O_BINARY)))
/* Read the original data */
for (x= 0; x < writer_handle.rows; x++)
{
ret= azread(&reader_handle, buffer, BUFFER_LEN, &error);
assert(!error);
assert(ret == BUFFER_LEN);
assert(!memcmp(buffer, TEST_STRING, ret));
}
assert(writer_handle.rows == TEST_LOOP_NUM);
/* Test here for falling off the planet */
/* Final Write before closing */
ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN);
assert(ret == BUFFER_LEN);
/* We don't use FINISH, but I want to have it tested */
azflush(&writer_handle, Z_FINISH);
assert(writer_handle.rows == TEST_LOOP_NUM+1);
/* Read final write */
azrewind(&reader_handle);
for (x= 0; x < writer_handle.rows; x++)
{
ret= azread(&reader_handle, buffer, BUFFER_LEN, &error);
assert(ret == BUFFER_LEN);
assert(!error);
assert(!memcmp(buffer, TEST_STRING, ret));
}
azclose(&writer_handle);
/* Rewind and full test */
azrewind(&reader_handle);
for (x= 0; x < writer_handle.rows; x++)
{
ret= azread(&reader_handle, buffer, BUFFER_LEN, &error);
assert(ret == BUFFER_LEN);
assert(!error);
assert(!memcmp(buffer, TEST_STRING, ret));
}
printf("Finished reading\n");
if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_RDWR|O_BINARY)))
{
printf("Could not open file (%s) for appending\n", TEST_FILENAME);
return 0;
}
ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN);
assert(ret == BUFFER_LEN);
azflush(&writer_handle, Z_SYNC_FLUSH);
/* Rewind and full test */
azrewind(&reader_handle);
for (x= 0; x < writer_handle.rows; x++)
{
ret= azread(&reader_handle, buffer, BUFFER_LEN, &error);
assert(!error);
assert(ret == BUFFER_LEN);
assert(!memcmp(buffer, TEST_STRING, ret));
}
azclose(&writer_handle);
azclose(&reader_handle);
unlink(TEST_FILENAME);
/* Start size tests */
printf("About to run 2/4/8 gig tests now, you may want to hit CTRL-C\n");
size_test(TWOGIG, 2097152);
size_test(FOURGIG, 4194304);
size_test(EIGHTGIG, 8388608);
return 0;
}
int size_test(unsigned long long length, unsigned long long rows_to_test_for)
{
azio_stream writer_handle, reader_handle;
unsigned long long write_length;
unsigned long long read_length= 0;
unsigned int ret;
char buffer[BUFFER_LEN];
int error;
if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_RDWR|O_TRUNC|O_BINARY)))
{
printf("Could not create test file\n");
return 0;
}
azwrite(&foo, TEST_STRING, sizeof(TEST_STRING));
azflush(&foo, Z_FINISH);
ret= azread(&foo1, buffer, BUFFER_LEN);
printf("Read %d bytes\n", ret);
printf("%s\n", buffer);
azclose(&foo);
azclose(&foo1);
/* unlink("test"); */
for (write_length= 0; write_length < length ; write_length+= ret)
{
ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN);
if (ret != BUFFER_LEN)
{
printf("Size %u\n", ret);
assert(ret != BUFFER_LEN);
}
if ((write_length % 14031) == 0)
{
azflush(&writer_handle, Z_SYNC_FLUSH);
}
}
assert(write_length == length);
azflush(&writer_handle, Z_SYNC_FLUSH);
printf("Reading back data\n");
if (!(ret= azopen(&reader_handle, TEST_FILENAME, O_RDONLY|O_BINARY)))
{
printf("Could not open test file\n");
return 0;
}
while ((ret= azread(&reader_handle, buffer, BUFFER_LEN, &error)))
{
read_length+= ret;
assert(!memcmp(buffer, TEST_STRING, ret));
if (ret != BUFFER_LEN)
{
printf("Size %u\n", ret);
assert(ret != BUFFER_LEN);
}
}
assert(read_length == length);
assert(writer_handle.rows == rows_to_test_for);
azclose(&writer_handle);
azclose(&reader_handle);
unlink(TEST_FILENAME);
return 0;
}
/*
azio is a modified version of gzio. It makes use of mysys and removes mallocs.
-Brian Aker
*/
/* gzio.c -- IO on .gz files
* Copyright (C) 1995-2005 Jean-loup Gailly.
* For conditions of distribution and use, see copyright notice in zlib.h
*
* Compile this file with -DNO_GZCOMPRESS to avoid the compression code.
*/
/* @(#) $Id$ */
......@@ -17,6 +17,7 @@
#include <string.h>
static int const gz_magic[2] = {0x1f, 0x8b}; /* gzip magic header */
static int const az_magic[2] = {0xfe, 0x03}; /* az magic header */
/* gzip flag byte */
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
......@@ -30,9 +31,11 @@ int az_open(azio_stream *s, const char *path, int Flags, File fd);
int do_flush(azio_stream *file, int flush);
int get_byte(azio_stream *s);
void check_header(azio_stream *s);
void write_header(azio_stream *s);
int destroy(azio_stream *s);
void putLong(File file, uLong x);
uLong getLong(azio_stream *s);
void read_header(azio_stream *s, unsigned char *buffer);
/* ===========================================================================
Opens a gzip (.gz) file for reading or writing. The mode parameter
......@@ -52,8 +55,8 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
s->stream.zalloc = (alloc_func)0;
s->stream.zfree = (free_func)0;
s->stream.opaque = (voidpf)0;
memset(s->inbuf, 0, Z_BUFSIZE);
memset(s->outbuf, 0, Z_BUFSIZE);
memset(s->inbuf, 0, AZ_BUFSIZE);
memset(s->outbuf, 0, AZ_BUFSIZE);
s->stream.next_in = s->inbuf;
s->stream.next_out = s->outbuf;
s->stream.avail_in = s->stream.avail_out = 0;
......@@ -65,20 +68,25 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
s->crc = crc32(0L, Z_NULL, 0);
s->transparent = 0;
s->mode = 'r';
s->version = (unsigned char)az_magic[1]; /* this needs to be a define to version */
if (Flags & O_WRONLY || Flags & O_APPEND)
/*
We do our own version of append by nature.
We must always have write access to take card of the header.
*/
DBUG_ASSERT(Flags | O_APPEND);
DBUG_ASSERT(Flags | O_WRONLY);
if (Flags & O_RDWR)
s->mode = 'w';
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
err = Z_STREAM_ERROR;
#else
if (s->mode == 'w')
{
err = deflateInit2(&(s->stream), level,
Z_DEFLATED, -MAX_WBITS, 8, strategy);
/* windowBits is passed < 0 to suppress zlib header */
s->stream.next_out = s->outbuf;
#endif
if (err != Z_OK)
{
destroy(s);
......@@ -100,7 +108,7 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
return Z_NULL;
}
}
s->stream.avail_out = Z_BUFSIZE;
s->stream.avail_out = AZ_BUFSIZE;
errno = 0;
s->file = fd < 0 ? my_open(path, Flags, MYF(0)) : fd;
......@@ -110,35 +118,63 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
destroy(s);
return Z_NULL;
}
if (s->mode == 'w') {
char buffer[10];
/* Write a very simple .gz header:
*/
buffer[0] = gz_magic[0];
buffer[1] = gz_magic[1];
buffer[2] = Z_DEFLATED;
buffer[3] = 0 /*flags*/;
buffer[4] = 0;
buffer[5] = 0;
buffer[6] = 0;
buffer[7] = 0 /*time*/;
buffer[8] = 0 /*xflags*/;
buffer[9] = 0x03;
s->start = 10L;
my_write(s->file, buffer, (uint)s->start, MYF(0));
/* We use 10L instead of ftell(s->file) to because ftell causes an
* fflush on some systems. This version of the library doesn't use
* start anyway in write mode, so this initialization is not
* necessary.
*/
} else {
check_header(s); /* skip the .gz header */
s->start = my_tell(s->file, MYF(0)) - s->stream.avail_in;
if (Flags & O_CREAT || Flags & O_TRUNC)
{
s->rows= 0;
s->forced_flushes= 0;
s->auto_increment= 0;
s->check_point= 0;
s->dirty= 1; /* We create the file dirty */
write_header(s);
my_seek(s->file, 0, MY_SEEK_END, MYF(0));
}
else if (s->mode == 'w')
{
unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, MYF(0));
read_header(s, buffer); /* skip the .az header */
my_seek(s->file, 0, MY_SEEK_END, MYF(0));
}
else
{
check_header(s); /* skip the .az header */
}
return 1;
}
void write_header(azio_stream *s)
{
char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
char *ptr= buffer;
s->start = AZHEADER_SIZE + AZMETA_BUFFER_SIZE;
s->block_size= AZ_BUFSIZE;
s->version = (unsigned char)az_magic[1];
/* Write a very simple .az header: */
bzero(buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE);
*(ptr + AZ_MAGIC_POS)= az_magic[0];
*(ptr + AZ_VERSION_POS)= (unsigned char)s->version;
*(ptr + AZ_BLOCK_POS)= (unsigned char)(s->block_size/1024); /* Reserved for block size */
*(ptr + AZ_STRATEGY_POS)= (unsigned char)Z_DEFAULT_STRATEGY; /* Compression Type */
int4store(ptr + AZ_FRM_POS, 0); /* FRM Block */
int4store(ptr + AZ_META_POS, 0); /* Meta Block */
int8store(ptr + AZ_START_POS, (unsigned long long)s->start); /* Start of Data Block Index Block */
int8store(ptr + AZ_ROW_POS, (unsigned long long)s->rows); /* Start of Data Block Index Block */
int8store(ptr + AZ_FLUSH_POS, (unsigned long long)s->forced_flushes); /* Start of Data Block Index Block */
int8store(ptr + AZ_CHECK_POS, (unsigned long long)s->check_point); /* Start of Data Block Index Block */
int8store(ptr + AZ_AUTOINCREMENT_POS, (unsigned long long)s->auto_increment); /* Start of Data Block Index Block */
*(ptr + AZ_DIRTY_POS)= (unsigned char)s->dirty; /* Start of Data Block Index Block */
/* Always begin at the begining, and end there as well */
my_pwrite(s->file, buffer, (uint)s->start, 0, MYF(0));
}
/* ===========================================================================
Opens a gzip (.gz) file for reading or writing.
*/
......@@ -170,7 +206,7 @@ int get_byte(s)
if (s->stream.avail_in == 0)
{
errno = 0;
s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, Z_BUFSIZE, MYF(0));
s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE, MYF(0));
if (s->stream.avail_in == 0)
{
s->z_eof = 1;
......@@ -206,7 +242,7 @@ void check_header(azio_stream *s)
if (len < 2) {
if (len) s->inbuf[0] = s->stream.next_in[0];
errno = 0;
len = (uInt)my_read(s->file, (byte *)s->inbuf + len, Z_BUFSIZE >> len, MYF(0));
len = (uInt)my_read(s->file, (byte *)s->inbuf + len, AZ_BUFSIZE >> len, MYF(0));
if (len == 0) s->z_err = Z_ERRNO;
s->stream.avail_in += len;
s->stream.next_in = s->inbuf;
......@@ -217,13 +253,11 @@ void check_header(azio_stream *s)
}
/* Peek ahead to check the gzip magic header */
if (s->stream.next_in[0] != gz_magic[0] ||
s->stream.next_in[1] != gz_magic[1]) {
s->transparent = 1;
return;
}
if ( s->stream.next_in[0] == gz_magic[0] && s->stream.next_in[1] == gz_magic[1])
{
s->stream.avail_in -= 2;
s->stream.next_in += 2;
s->version= (unsigned char)2;
/* Check the rest of the gzip header */
method = get_byte(s);
......@@ -252,6 +286,43 @@ void check_header(azio_stream *s)
for (len = 0; len < 2; len++) (void)get_byte(s);
}
s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK;
s->start = my_tell(s->file, MYF(0)) - s->stream.avail_in;
}
else if ( s->stream.next_in[0] == az_magic[0] && s->stream.next_in[1] == az_magic[1])
{
unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
for (len = 0; len < (AZHEADER_SIZE + AZMETA_BUFFER_SIZE); len++)
buffer[len]= get_byte(s);
s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK;
read_header(s, buffer);
}
else
{
s->z_err = Z_OK;
return;
}
}
void read_header(azio_stream *s, unsigned char *buffer)
{
if (buffer[0] == az_magic[0] && buffer[1] == az_magic[1])
{
s->version= (unsigned int)buffer[AZ_VERSION_POS];
s->block_size= 1024 * buffer[AZ_BLOCK_POS];
s->start= (unsigned long long)uint8korr(buffer + AZ_START_POS);
s->rows= (unsigned long long)uint8korr(buffer + AZ_ROW_POS);
s->check_point= (unsigned long long)uint8korr(buffer + AZ_CHECK_POS);
s->forced_flushes= (unsigned long long)uint8korr(buffer + AZ_FLUSH_POS);
s->auto_increment= (unsigned long long)uint8korr(buffer + AZ_AUTOINCREMENT_POS);
s->dirty= (unsigned int)buffer[AZ_DIRTY_POS];
}
else
{
DBUG_ASSERT(buffer[0] == az_magic[0] && buffer[1] == az_magic[1]);
return;
}
}
/* ===========================================================================
......@@ -265,11 +336,7 @@ int destroy (s)
if (s->stream.state != NULL) {
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
err = Z_STREAM_ERROR;
#else
err = deflateEnd(&(s->stream));
#endif
}
else if (s->mode == 'r')
{
......@@ -292,15 +359,28 @@ int destroy (s)
Reads the given number of uncompressed bytes from the compressed file.
azread returns the number of bytes actually read (0 for end of file).
*/
int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned len)
unsigned int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned int len, int *error)
{
Bytef *start = (Bytef*)buf; /* starting point for crc computation */
Byte *next_out; /* == stream.next_out but not forced far (for MSDOS) */
*error= 0;
if (s->mode != 'r') return Z_STREAM_ERROR;
if (s->mode != 'r')
{
*error= Z_STREAM_ERROR;
return 0;
}
if (s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO) return -1;
if (s->z_err == Z_STREAM_END) return 0; /* EOF */
if (s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO)
{
*error= s->z_err;
return 0;
}
if (s->z_err == Z_STREAM_END) /* EOF */
{
return 0;
}
next_out = (Byte*)buf;
s->stream.next_out = (Bytef*)buf;
......@@ -315,9 +395,11 @@ int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned len)
start++;
if (s->last) {
s->z_err = Z_STREAM_END;
{
return 1;
}
}
}
while (s->stream.avail_out != 0) {
......@@ -342,12 +424,14 @@ int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned len)
s->in += len;
s->out += len;
if (len == 0) s->z_eof = 1;
return (int)len;
{
return len;
}
}
if (s->stream.avail_in == 0 && !s->z_eof) {
errno = 0;
s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, Z_BUFSIZE, MYF(0));
s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE, MYF(0));
if (s->stream.avail_in == 0)
{
s->z_eof = 1;
......@@ -374,7 +458,8 @@ int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned len)
* Check for such files:
*/
check_header(s);
if (s->z_err == Z_OK) {
if (s->z_err == Z_OK)
{
inflateReset(&(s->stream));
s->crc = crc32(0L, Z_NULL, 0);
}
......@@ -386,34 +471,40 @@ int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned len)
if (len == s->stream.avail_out &&
(s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO))
return -1;
return (int)(len - s->stream.avail_out);
{
*error= s->z_err;
return 0;
}
return (len - s->stream.avail_out);
}
#ifndef NO_GZCOMPRESS
/* ===========================================================================
Writes the given number of uncompressed bytes into the compressed file.
azwrite returns the number of bytes actually written (0 in case of error).
*/
int azwrite (azio_stream *s, voidpc buf, unsigned len)
unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len)
{
s->stream.next_in = (Bytef*)buf;
s->stream.avail_in = len;
s->rows++;
while (s->stream.avail_in != 0)
{
if (s->stream.avail_out == 0)
{
s->stream.next_out = s->outbuf;
if (my_write(s->file, (byte *)s->outbuf, Z_BUFSIZE, MYF(0)) != Z_BUFSIZE)
if (my_write(s->file, (byte *)s->outbuf, AZ_BUFSIZE, MYF(0)) != AZ_BUFSIZE)
{
s->z_err = Z_ERRNO;
break;
}
s->stream.avail_out = Z_BUFSIZE;
s->stream.avail_out = AZ_BUFSIZE;
}
s->in += s->stream.avail_in;
s->out += s->stream.avail_out;
......@@ -424,19 +515,15 @@ int azwrite (azio_stream *s, voidpc buf, unsigned len)
}
s->crc = crc32(s->crc, (const Bytef *)buf, len);
return (int)(len - s->stream.avail_in);
return (unsigned int)(len - s->stream.avail_in);
}
#endif
/* ===========================================================================
Flushes all pending output into the compressed file. The parameter
flush is as in the deflate() function.
*/
int do_flush (s, flush)
azio_stream *s;
int flush;
int do_flush (azio_stream *s, int flush)
{
uInt len;
int done = 0;
......@@ -445,8 +532,9 @@ int do_flush (s, flush)
s->stream.avail_in = 0; /* should be zero already anyway */
for (;;) {
len = Z_BUFSIZE - s->stream.avail_out;
for (;;)
{
len = AZ_BUFSIZE - s->stream.avail_out;
if (len != 0) {
if ((uInt)my_write(s->file, (byte *)s->outbuf, len, MYF(0)) != len)
......@@ -455,7 +543,7 @@ int do_flush (s, flush)
return Z_ERRNO;
}
s->stream.next_out = s->outbuf;
s->stream.avail_out = Z_BUFSIZE;
s->stream.avail_out = AZ_BUFSIZE;
}
if (done) break;
s->out += s->stream.avail_out;
......@@ -472,6 +560,11 @@ int do_flush (s, flush)
if (s->z_err != Z_OK && s->z_err != Z_STREAM_END) break;
}
if (flush == Z_FINISH)
s->dirty= 0; /* Mark it clean, we should be good now */
write_header(s);
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
}
......@@ -479,11 +572,25 @@ int ZEXPORT azflush (s, flush)
azio_stream *s;
int flush;
{
int err = do_flush (s, flush);
int err;
if (s->mode == 'r')
{
unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, MYF(0));
read_header(s, buffer); /* skip the .az header */
return Z_OK;
}
else
{
s->forced_flushes++;
err= do_flush(s, flush);
if (err) return err;
my_sync(s->file, MYF(0));
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
}
}
/* ===========================================================================
......@@ -525,19 +632,17 @@ my_off_t azseek (s, offset, whence)
return -1L;
}
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
return -1L;
#else
if (whence == SEEK_SET) {
if (s->mode == 'w')
{
if (whence == SEEK_SET)
offset -= s->in;
}
/* At this point, offset is the number of zero bytes to write. */
/* There was a zmemzero here if inbuf was null -Brian */
while (offset > 0) {
uInt size = Z_BUFSIZE;
if (offset < Z_BUFSIZE) size = (uInt)offset;
while (offset > 0)
{
uInt size = AZ_BUFSIZE;
if (offset < AZ_BUFSIZE) size = (uInt)offset;
size = azwrite(s, s->inbuf, size);
if (size == 0) return -1L;
......@@ -545,7 +650,6 @@ my_off_t azseek (s, offset, whence)
offset -= size;
}
return s->in;
#endif
}
/* Rest of function is for reading only */
......@@ -580,11 +684,12 @@ my_off_t azseek (s, offset, whence)
if (s->last) s->z_err = Z_STREAM_END;
}
while (offset > 0) {
int size = Z_BUFSIZE;
if (offset < Z_BUFSIZE) size = (int)offset;
int error;
unsigned int size = AZ_BUFSIZE;
if (offset < AZ_BUFSIZE) size = (int)offset;
size = azread(s, s->outbuf, (uInt)size);
if (size <= 0) return -1L;
size = azread(s, s->outbuf, size, &error);
if (error <= 0) return -1L;
offset -= size;
}
return s->out;
......@@ -644,16 +749,14 @@ int azclose (azio_stream *s)
if (s == NULL) return Z_STREAM_ERROR;
if (s->mode == 'w') {
#ifdef NO_GZCOMPRESS
return Z_STREAM_ERROR;
#else
if (s->mode == 'w')
{
if (do_flush (s, Z_FINISH) != Z_OK)
return destroy(s);
putLong(s->file, s->crc);
putLong(s->file, (uLong)(s->in & 0xffffffff));
#endif
}
return destroy(s);
}
/*
This libary has been modified for use by the MySQL Archive Engine.
-Brian Aker
*/
/* zlib.h -- interface of the 'zlib' general purpose compression library
version 1.2.3, July 18th, 2005
......@@ -34,10 +36,34 @@
#include <zlib.h>
#include "../../mysys/mysys_priv.h"
#include <my_dir.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Start of MySQL Specific Information */
/*
ulonglong + ulonglong + ulonglong + ulonglong + uchar
*/
#define AZMETA_BUFFER_SIZE sizeof(unsigned long long) \
+ sizeof(unsigned long long) + sizeof(unsigned long long) + sizeof(unsigned long long) \
+ sizeof(unsigned char)
#define AZHEADER_SIZE 20
#define AZ_MAGIC_POS 0
#define AZ_VERSION_POS 1
#define AZ_BLOCK_POS 2
#define AZ_STRATEGY_POS 3
#define AZ_FRM_POS 4
#define AZ_META_POS 8
#define AZ_START_POS 12
#define AZ_ROW_POS 20
#define AZ_FLUSH_POS 28
#define AZ_CHECK_POS 36
#define AZ_AUTOINCREMENT_POS 44
#define AZ_DIRTY_POS 52
/*
The 'zlib' compression library provides in-memory compression and
......@@ -152,7 +178,7 @@ extern "C" {
/* The deflate compression method (the only one supported in this version) */
#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
#define Z_BUFSIZE 16384
#define AZ_BUFSIZE 16384
typedef struct azio_stream {
......@@ -160,8 +186,8 @@ typedef struct azio_stream {
int z_err; /* error code for last stream operation */
int z_eof; /* set if end of input file */
File file; /* .gz file */
Byte inbuf[Z_BUFSIZE]; /* input buffer */
Byte outbuf[Z_BUFSIZE]; /* output buffer */
Byte inbuf[AZ_BUFSIZE]; /* input buffer */
Byte outbuf[AZ_BUFSIZE]; /* output buffer */
uLong crc; /* crc32 of uncompressed data */
char *msg; /* error message */
int transparent; /* 1 if input file is not a .gz file */
......@@ -171,6 +197,13 @@ typedef struct azio_stream {
my_off_t out; /* bytes out of deflate or inflate */
int back; /* one character push-back */
int last; /* true if push-back is last character */
unsigned char version; /* Version */
unsigned int block_size; /* Block Size */
unsigned long long check_point; /* Last position we checked */
unsigned long long forced_flushes; /* Forced Flushes */
unsigned long long rows; /* rows */
unsigned long long auto_increment; /* auto increment field */
unsigned char dirty; /* State of file */
} azio_stream;
/* basic functions */
......@@ -206,7 +239,7 @@ int azdopen(azio_stream *s,File fd, int Flags);
*/
extern int azread(azio_stream *file, voidp buf, unsigned len);
extern unsigned int azread ( azio_stream *s, voidp buf, unsigned int len, int *error);
/*
Reads the given number of uncompressed bytes from the compressed file.
If the input file was not in gzip format, gzread copies the given number
......@@ -214,10 +247,10 @@ extern int azread(azio_stream *file, voidp buf, unsigned len);
gzread returns the number of uncompressed bytes actually read (0 for
end of file, -1 for error). */
extern int azwrite (azio_stream *file, voidpc buf, unsigned len);
extern unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len);
/*
Writes the given number of uncompressed bytes into the compressed file.
gzwrite returns the number of uncompressed bytes actually written
azwrite returns the number of uncompressed bytes actually written
(0 in case of error).
*/
......
......@@ -120,7 +120,7 @@ static HASH archive_open_tables;
/* The file extension */
#define ARZ ".ARZ" // The data file
#define ARN ".ARN" // Files used during an optimize call
#define ARM ".ARM" // Meta file
#define ARM ".ARM" // Meta file (deprecated)
/*
uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + FN_REFLEN
+ uchar
......@@ -145,6 +145,11 @@ static handler *archive_create_handler(handlerton *hton,
*/
#define ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT 2
/*
Size of header used for row
*/
#define ARCHIVE_ROW_HEADER_SIZE 4
static handler *archive_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root)
......@@ -236,158 +241,42 @@ ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg)
*/
int ha_archive::read_data_header(azio_stream *file_to_read)
{
int error;
unsigned long ret;
uchar data_buffer[DATA_BUFFER_SIZE];
DBUG_ENTER("ha_archive::read_data_header");
if (azrewind(file_to_read) == -1)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
if (azread(file_to_read, data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE)
DBUG_RETURN(errno ? errno : -1);
DBUG_PRINT("ha_archive::read_data_header", ("Check %u", data_buffer[0]));
DBUG_PRINT("ha_archive::read_data_header", ("Version %u", data_buffer[1]));
if ((data_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) &&
(data_buffer[1] != (uchar)ARCHIVE_VERSION))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
if (file_to_read->version >= 3)
DBUG_RETURN(0);
}
/*
This method writes out the header of a datafile and returns whether or not it was successful.
*/
int ha_archive::write_data_header(azio_stream *file_to_write)
{
uchar data_buffer[DATA_BUFFER_SIZE];
DBUG_ENTER("ha_archive::write_data_header");
data_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
data_buffer[1]= (uchar)ARCHIVE_VERSION;
/* Everything below this is just legacy to version 2< */
if (azwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) !=
DATA_BUFFER_SIZE)
goto error;
DBUG_PRINT("ha_archive::write_data_header", ("Check %u", (uint)data_buffer[0]));
DBUG_PRINT("ha_archive::write_data_header", ("Version %u", (uint)data_buffer[1]));
DBUG_PRINT("ha_archive", ("Reading legacy data header"));
DBUG_RETURN(0);
error:
DBUG_RETURN(errno);
}
ret= azread(file_to_read, data_buffer, DATA_BUFFER_SIZE, &error);
/*
This method reads the header of a meta file and returns whether or not it was successful.
*rows will contain the current number of rows in the data file upon success.
*/
int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
ulonglong *auto_increment,
ulonglong *forced_flushes,
char *real_path)
{
uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
ulonglong check_point;
DBUG_ENTER("ha_archive::read_meta_file");
if (ret != DATA_BUFFER_SIZE)
{
DBUG_PRINT("ha_archive", ("Reading, expected %d got %lu",
DATA_BUFFER_SIZE, ret));
DBUG_RETURN(1);
}
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
DBUG_RETURN(-1);
if (error)
{
DBUG_PRINT("ha_archive", ("Compression error (%d)", error));
DBUG_RETURN(1);
}
/*
Parse out the meta data, we ignore version at the moment
*/
DBUG_PRINT("ha_archive", ("Check %u", data_buffer[0]));
DBUG_PRINT("ha_archive", ("Version %u", data_buffer[1]));
ptr+= sizeof(uchar)*2; // Move past header
*rows= (ha_rows)uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past rows
check_point= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past check_point
*auto_increment= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past auto_increment
*forced_flushes= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past forced_flush
memmove(real_path, ptr, FN_REFLEN);
ptr+= FN_REFLEN; // Move past the possible location of the file
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lu", (ulong) *rows));
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lu", (ulong) check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %lu",
(ulong) *auto_increment));
DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %lu",
(ulong) *forced_flushes));
DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
((bool)(*ptr)== TRUE))
if ((data_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) &&
(data_buffer[1] != (uchar)ARCHIVE_VERSION))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
my_sync(meta_file, MYF(MY_WME));
DBUG_RETURN(0);
}
/*
This method writes out the header of a meta file and returns whether or not it was successful.
By setting dirty you say whether or not the file represents the actual state of the data file.
Upon ::open() we set to dirty, and upon ::close() we set to clean.
*/
int ha_archive::write_meta_file(File meta_file, ha_rows rows,
ulonglong auto_increment,
ulonglong forced_flushes,
char *real_path,
bool dirty)
{
uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
ulonglong check_point= 0; //Reserved for the future
DBUG_ENTER("ha_archive::write_meta_file");
*ptr= (uchar)ARCHIVE_CHECK_HEADER;
ptr += sizeof(uchar);
*ptr= (uchar)ARCHIVE_VERSION;
ptr += sizeof(uchar);
int8store(ptr, (ulonglong)rows);
ptr += sizeof(ulonglong);
int8store(ptr, check_point);
ptr += sizeof(ulonglong);
int8store(ptr, auto_increment);
ptr += sizeof(ulonglong);
int8store(ptr, forced_flushes);
ptr += sizeof(ulonglong);
// No matter what, we pad with nulls
if (real_path)
strncpy((char *)ptr, real_path, FN_REFLEN);
else
bzero(ptr, FN_REFLEN);
ptr += FN_REFLEN;
*ptr= (uchar)dirty;
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d",
(uint)ARCHIVE_CHECK_HEADER));
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d",
(uint)ARCHIVE_VERSION));
DBUG_PRINT("ha_archive::write_meta_file", ("Rows %lu", (ulong) rows));
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %lu", (ulong) check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %lu",
(ulong) auto_increment));
DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %lu",
(ulong) forced_flushes));
DBUG_PRINT("ha_archive::write_meta_file", ("Real path %s",
real_path));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
DBUG_RETURN(-1);
my_sync(meta_file, MYF(MY_WME));
DBUG_RETURN(0);
}
......@@ -403,9 +292,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
TABLE *table, int *rc)
{
ARCHIVE_SHARE *share;
char meta_file_name[FN_REFLEN];
uint length;
char *tmp_name;
DBUG_ENTER("ha_archive::get_share");
pthread_mutex_lock(&archive_mutex);
......@@ -415,6 +302,9 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
(byte*) table_name,
length)))
{
char *tmp_name;
char tmp_file_name[FN_REFLEN];
azio_stream archive_tmp;
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
&share, sizeof(*share),
&tmp_name, length+1,
......@@ -432,38 +322,34 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
share->archive_write_open= FALSE;
fn_format(share->data_file_name, table_name, "",
ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
fn_format(meta_file_name, table_name, "", ARM,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
strmov(share->table_name,table_name);
/*
We will use this lock for rows.
*/
VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
share->crashed= TRUE;
DBUG_PRINT("info", ("archive opening (1) up write at %s",
DBUG_PRINT("ha_archive", ("archive opening (1) up write at %s",
share->data_file_name));
/*
We read the meta file, but do not mark it dirty unless we actually do
a write.
We read the meta file, but do not mark it dirty. Since we are not
doing a write we won't mark it dirty (and we won't open it for
anything but reading... open it for write and we will generate null
compression writes).
*/
if (read_meta_file(share->meta_file, &share->rows_recorded,
&share->auto_increment_value,
&share->forced_flushes,
share->real_path))
share->crashed= TRUE;
/*
Since we now possibly no real_path, we will use it instead if it exists.
*/
if (*share->real_path)
fn_format(share->data_file_name, table_name, share->real_path, ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY|O_BINARY)))
{
DBUG_RETURN(NULL);
}
stats.auto_increment_value= archive_tmp.auto_increment;
share->rows_recorded= archive_tmp.rows;
share->crashed= archive_tmp.dirty;
azclose(&archive_tmp);
VOID(my_hash_insert(&archive_open_tables, (byte*) share));
thr_lock_init(&share->lock);
}
share->use_count++;
DBUG_PRINT("info", ("archive table %.*s has %d open handles now",
DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles now",
share->table_name_length, share->table_name,
share->use_count));
if (share->crashed)
......@@ -478,20 +364,20 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
Free the share.
See ha_example.cc for a description.
*/
int ha_archive::free_share(ARCHIVE_SHARE *share)
int ha_archive::free_share(ARCHIVE_SHARE *share_to_free)
{
int rc= 0;
DBUG_ENTER("ha_archive::free_share");
DBUG_PRINT("info", ("archive table %.*s has %d open handles on entrance",
share->table_name_length, share->table_name,
share->use_count));
DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles on entrance",
share_to_free->table_name_length, share_to_free->table_name,
share_to_free->use_count));
pthread_mutex_lock(&archive_mutex);
if (!--share->use_count)
if (!--share_to_free->use_count)
{
hash_delete(&archive_open_tables, (byte*) share);
thr_lock_delete(&share->lock);
VOID(pthread_mutex_destroy(&share->mutex));
hash_delete(&archive_open_tables, (byte*) share_to_free);
thr_lock_delete(&share_to_free->lock);
VOID(pthread_mutex_destroy(&share_to_free->mutex));
/*
We need to make sure we don't reset the crashed state.
If we open a crashed file, wee need to close it as crashed unless
......@@ -499,18 +385,12 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
Since we will close the data down after this, we go on and count
the flush on close;
*/
share->forced_flushes++;
(void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value,
share->forced_flushes,
share->real_path,
share->crashed ? TRUE :FALSE);
if (share->archive_write_open)
if (azclose(&(share->archive_write)))
rc= 1;
if (my_close(share->meta_file, MYF(0)))
if (share_to_free->archive_write_open)
{
if (azclose(&(share_to_free->archive_write)))
rc= 1;
my_free((gptr) share, MYF(0));
}
my_free((gptr) share_to_free, MYF(0));
}
pthread_mutex_unlock(&archive_mutex);
......@@ -520,21 +400,15 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
int ha_archive::init_archive_writer()
{
DBUG_ENTER("ha_archive::init_archive_writer");
(void)write_meta_file(share->meta_file, share->rows_recorded,
share->auto_increment_value,
share->forced_flushes,
share->real_path,
TRUE);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
if (!(azopen(&(share->archive_write), share->data_file_name,
O_WRONLY|O_APPEND|O_BINARY)))
O_RDWR|O_BINARY)))
{
DBUG_PRINT("info", ("Could not open archive write file"));
DBUG_PRINT("ha_archive", ("Could not open archive write file"));
share->crashed= TRUE;
DBUG_RETURN(1);
}
......@@ -549,7 +423,6 @@ int ha_archive::init_archive_writer()
*/
static const char *ha_archive_exts[] = {
ARZ,
ARM,
NullS
};
......@@ -570,7 +443,7 @@ int ha_archive::open(const char *name, int mode, uint open_options)
int rc= 0;
DBUG_ENTER("ha_archive::open");
DBUG_PRINT("info", ("archive table was opened for crash: %s",
DBUG_PRINT("ha_archive", ("archive table was opened for crash: %s",
(open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no"));
share= get_share(name, table, &rc);
......@@ -584,9 +457,21 @@ int ha_archive::open(const char *name, int mode, uint open_options)
DBUG_RETURN(rc);
}
thr_lock_data_init(&share->lock,&lock,NULL);
DBUG_ASSERT(share);
record_buffer= create_record_buffer(table->s->reclength +
ARCHIVE_ROW_HEADER_SIZE);
DBUG_PRINT("info", ("archive data_file_name %s", share->data_file_name));
if (!record_buffer)
{
free_share(share);
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
}
thr_lock_data_init(&share->lock, &lock, NULL);
DBUG_PRINT("ha_archive", ("archive data_file_name %s", share->data_file_name));
if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)))
{
if (errno == EROFS || errno == EACCES)
......@@ -594,7 +479,7 @@ int ha_archive::open(const char *name, int mode, uint open_options)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
}
DBUG_PRINT("info", ("archive table was crashed %s",
DBUG_PRINT("ha_archive", ("archive table was crashed %s",
rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no"));
if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR)
{
......@@ -627,6 +512,8 @@ int ha_archive::close(void)
int rc= 0;
DBUG_ENTER("ha_archive::close");
destroy_record_buffer(record_buffer);
/* First close stream */
if (azclose(&archive))
rc= 1;
......@@ -649,9 +536,11 @@ int ha_archive::close(void)
int ha_archive::create(const char *name, TABLE *table_arg,
HA_CREATE_INFO *create_info)
{
File create_file; // We use to create the datafile and the metafile
char name_buff[FN_REFLEN];
char linkname[FN_REFLEN];
int error;
azio_stream create_stream; /* Archive file we are working with */
DBUG_ENTER("ha_archive::create");
stats.auto_increment_value= (create_info->auto_increment_value ?
......@@ -671,71 +560,45 @@ int ha_archive::create(const char *name, TABLE *table_arg,
if (!(field->flags & AUTO_INCREMENT_FLAG))
{
error= -1;
DBUG_PRINT("info", ("Index error in creating archive table"));
DBUG_PRINT("ha_archive", ("Index error in creating archive table"));
goto error;
}
}
}
if ((create_file= my_create(fn_format(name_buff,name,"",ARM,
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
{
error= my_errno;
goto error;
}
/*
We reuse name_buff since it is available.
*/
write_meta_file(create_file, 0, stats.auto_increment_value, 0,
(create_info->data_file_name &&
dirname_part(name_buff, (char*)create_info->data_file_name))
? name_buff : 0, FALSE);
my_close(create_file,MYF(0));
/*
We reuse name_buff since it is available.
*/
if (create_info->data_file_name)
{
char linkname[FN_REFLEN];
DBUG_PRINT("info", ("archive will create stream file %s",
DBUG_PRINT("ha_archive", ("archive will create stream file %s",
create_info->data_file_name));
fn_format(name_buff, create_info->data_file_name, "", ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
fn_format(linkname, name, "", ARZ,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
if ((create_file= my_create_with_symlink(linkname, name_buff, 0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
{
error= my_errno;
goto error;
}
}
else
{
if ((create_file= my_create(fn_format(name_buff, name,"", ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
{
error= my_errno;
goto error;
}
fn_format(name_buff, name,"", ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
linkname[0]= 0;
}
if (!azdopen(&archive, create_file, O_WRONLY|O_BINARY))
if (!(azopen(&create_stream, linkname[0] ? linkname : name_buff, O_CREAT|O_RDWR|O_BINARY)))
{
error= errno;
goto error2;
}
if (write_data_header(&archive))
{
error= errno;
goto error3;
}
if (azclose(&archive))
/*
Yes you need to do this, because the starting value
for the autoincrement may not be zero.
*/
create_stream.auto_increment= stats.auto_increment_value;
if (azclose(&create_stream))
{
error= errno;
goto error2;
......@@ -743,11 +606,8 @@ int ha_archive::create(const char *name, TABLE *table_arg,
DBUG_RETURN(0);
error3:
/* We already have an error, so ignore results of azclose. */
(void)azclose(&archive);
error2:
my_close(create_file, MYF(0));
azclose(&create_stream);
delete_table(name);
error:
/* Return error number, if we got one */
......@@ -761,36 +621,79 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer)
{
my_off_t written;
uint *ptr, *end;
unsigned int r_pack_length;
DBUG_ENTER("ha_archive::real_write_row");
written= azwrite(writer, buf, table->s->reclength);
DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %lu",
(int) written, table->s->reclength));
/* We pack the row for writing */
r_pack_length= pack_row(buf);
written= azwrite(writer, record_buffer->buffer, r_pack_length);
if (written != r_pack_length)
{
DBUG_PRINT("ha_archive", ("Wrote %d bytes expected %d",
(uint32) written,
(uint32)r_pack_length));
DBUG_RETURN(-1);
}
if (!delayed_insert || !bulk_insert)
share->dirty= TRUE;
if (written != (my_off_t)table->s->reclength)
DBUG_RETURN(errno ? errno : -1);
/*
We should probably mark the table as damagaged if the record is written
but the blob fails.
*/
for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields ;
DBUG_RETURN(0);
}
/*
Calculate max length needed for row. This includes
the bytes required for the length in the header.
*/
uint32 ha_archive::max_row_length(const byte *buf)
{
ulonglong length= table->s->reclength + table->s->fields*2;
length+= ARCHIVE_ROW_HEADER_SIZE;
uint *ptr, *end;
for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
ptr != end ;
ptr++)
{
char *data_ptr;
uint32 size= ((Field_blob*) table->field[*ptr])->get_length();
length += 2 + ((Field_blob*) table->field[*ptr])->get_length();
}
if (size)
return length;
}
unsigned int ha_archive::pack_row(byte *record)
{
byte *ptr;
ulonglong full_length;
DBUG_ENTER("ha_archive::pack_row");
if (fix_rec_buff(max_row_length(record)))
DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */
/* Copy null bits */
memcpy(record_buffer->buffer+ARCHIVE_ROW_HEADER_SIZE,
record, table->s->null_bytes);
ptr= record_buffer->buffer + table->s->null_bytes + ARCHIVE_ROW_HEADER_SIZE;
for (Field **field=table->field ; *field ; field++)
{
((Field_blob*) table->field[*ptr])->get_ptr(&data_ptr);
written= azwrite(writer, data_ptr, (unsigned)size);
if (written != (my_off_t)size)
DBUG_RETURN(errno ? errno : -1);
ptr=(byte*) (*field)->pack((char*) ptr,
(char*) record + (*field)->offset(record));
}
}
DBUG_RETURN(0);
int4store(record_buffer->buffer, (int)(ptr - record_buffer->buffer -
ARCHIVE_ROW_HEADER_SIZE));
DBUG_PRINT("ha_archive",("Pack row length %u", (unsigned int)
(ptr - record_buffer->buffer -
ARCHIVE_ROW_HEADER_SIZE)));
DBUG_RETURN((unsigned int) (ptr - record_buffer->buffer));
}
......@@ -807,6 +710,7 @@ int ha_archive::write_row(byte *buf)
{
int rc;
byte *read_buf= NULL;
byte *ptr;
ulonglong temp_auto;
byte *record= table->record[0];
DBUG_ENTER("ha_archive::write_row");
......@@ -814,31 +718,38 @@ int ha_archive::write_row(byte *buf)
if (share->crashed)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
if (!share->archive_write_open)
if (init_archive_writer())
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
ha_statistic_increment(&SSV::ha_write_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
pthread_mutex_lock(&share->mutex);
if (table->next_number_field)
if (table->next_number_field && record == table->record[0])
{
KEY *mkey= &table->s->key_info[0]; // We only support one key right now
update_auto_increment();
temp_auto= table->next_number_field->val_int();
/*
Bad news, this will cause a search for the unique value which is very
expensive since we will have to do a table scan which will lock up
all other writers during this period. This could perhaps be optimized
in the future.
Simple optimization to see if we fail for duplicate key immediatly
because we have just given out this value.
*/
if (temp_auto == share->auto_increment_value &&
if (temp_auto == share->archive_write.auto_increment &&
mkey->flags & HA_NOSAME)
{
rc= HA_ERR_FOUND_DUPP_KEY;
goto error;
}
if (temp_auto < share->auto_increment_value &&
/*
Bad news, this will cause a search for the unique value which is very
expensive since we will have to do a table scan which will lock up
all other writers during this period. This could perhaps be optimized
in the future.
*/
if (temp_auto < share->archive_write.auto_increment &&
mkey->flags & HA_NOSAME)
{
/*
......@@ -855,7 +766,6 @@ int ha_archive::write_row(byte *buf)
data
*/
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
/*
Set the position of the local read thread to the beginning postion.
*/
......@@ -865,12 +775,6 @@ int ha_archive::write_row(byte *buf)
goto error;
}
/*
Now we read and check all of the rows.
if (!memcmp(table->next_number_field->ptr, mfield->ptr, mfield->max_length()))
if ((longlong)temp_auto ==
mfield->val_int((char*)(read_buf + mfield->offset())))
*/
Field *mfield= table->next_number_field;
while (!(get_row(&archive, read_buf)))
......@@ -886,8 +790,9 @@ int ha_archive::write_row(byte *buf)
}
else
{
if (temp_auto > share->auto_increment_value)
stats.auto_increment_value= share->auto_increment_value= temp_auto;
if (temp_auto > share->archive_write.auto_increment)
stats.auto_increment_value= share->archive_write.auto_increment=
temp_auto;
}
}
......@@ -895,10 +800,6 @@ int ha_archive::write_row(byte *buf)
Notice that the global auto_increment has been increased.
In case of a failed row write, we will never try to reuse the value.
*/
if (!share->archive_write_open)
if (init_archive_writer())
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
share->rows_recorded++;
rc= real_write_row(buf, &(share->archive_write));
error:
......@@ -916,7 +817,7 @@ void ha_archive::get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong *nb_reserved_values)
{
*nb_reserved_values= 1;
*first_value= share->auto_increment_value + 1;
*first_value= share->archive_write.auto_increment + 1;
}
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
......@@ -961,7 +862,6 @@ int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
*/
pthread_mutex_lock(&share->mutex);
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
pthread_mutex_unlock(&share->mutex);
/*
......@@ -1025,7 +925,8 @@ int ha_archive::rnd_init(bool scan)
if (scan)
{
scan_rows= share->rows_recorded;
DBUG_PRINT("info", ("archive will retrieve %lu rows", (ulong) scan_rows));
DBUG_PRINT("info", ("archive will retrieve %llu rows",
(unsigned long long) scan_rows));
stats.records= 0;
/*
......@@ -1037,9 +938,8 @@ int ha_archive::rnd_init(bool scan)
pthread_mutex_lock(&share->mutex);
if (share->dirty == TRUE)
{
DBUG_PRINT("info", ("archive flushing out rows for scan"));
DBUG_PRINT("ha_archive", ("archive flushing out rows for scan"));
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
share->dirty= FALSE;
}
pthread_mutex_unlock(&share->mutex);
......@@ -1059,24 +959,125 @@ int ha_archive::rnd_init(bool scan)
*/
int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
{
int read; // Bytes read, azread() returns int
int rc;
DBUG_ENTER("ha_archive::get_row");
DBUG_PRINT("ha_archive", ("Picking version for get_row() %d -> %d",
(uchar)file_to_read->version,
ARCHIVE_VERSION));
if (file_to_read->version == ARCHIVE_VERSION)
rc= get_row_version3(file_to_read, buf);
else
rc= get_row_version2(file_to_read, buf);
DBUG_PRINT("ha_archive", ("Return %d\n", rc));
DBUG_RETURN(rc);
}
/* Reallocate buffer if needed */
bool ha_archive::fix_rec_buff(unsigned int length)
{
DBUG_ENTER("ha_archive::fix_rec_buff");
DBUG_PRINT("ha_archive", ("Fixing %u for %u",
length, record_buffer->length));
DBUG_ASSERT(record_buffer->buffer);
if (length > record_buffer->length);
{
byte *newptr;
if (!(newptr=(byte*) my_realloc((gptr) record_buffer->buffer,
length,
MYF(MY_ALLOW_ZERO_PTR))))
DBUG_RETURN(1);
record_buffer->buffer= newptr;
record_buffer->length= length;
}
DBUG_ASSERT(length <= record_buffer->length);
DBUG_RETURN(0);
}
int ha_archive::unpack_row(azio_stream *file_to_read, char *record)
{
DBUG_ENTER("ha_archive::unpack_row");
unsigned int read;
int error;
byte size_buffer[ARCHIVE_ROW_HEADER_SIZE];
unsigned int row_len;
/* First we grab the length stored */
read= azread(file_to_read, (byte *)size_buffer, ARCHIVE_ROW_HEADER_SIZE, &error);
if (error == Z_STREAM_ERROR || (read && read < ARCHIVE_ROW_HEADER_SIZE))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
/* If we read nothing we are at the end of the file */
if (read == 0 || read != ARCHIVE_ROW_HEADER_SIZE)
DBUG_RETURN(HA_ERR_END_OF_FILE);
row_len= uint4korr(size_buffer);
DBUG_PRINT("ha_archive",("Unpack row length %u -> %u", row_len,
(unsigned int)table->s->reclength));
fix_rec_buff(row_len);
DBUG_ASSERT(row_len <= record_buffer->length);
read= azread(file_to_read, record_buffer->buffer, row_len, &error);
DBUG_ASSERT(row_len == read);
if (read != row_len || error)
{
DBUG_RETURN(-1);
}
/* Copy null bits */
const char *ptr= (const char*) record_buffer->buffer;
memcpy(record, ptr, table->s->null_bytes);
ptr+= table->s->null_bytes;
for (Field **field=table->field ; *field ; field++)
ptr= (*field)->unpack(record + (*field)->offset(table->record[0]), ptr);
DBUG_RETURN(0);
}
int ha_archive::get_row_version3(azio_stream *file_to_read, byte *buf)
{
DBUG_ENTER("ha_archive::get_row_version3");
int returnable= unpack_row(file_to_read, buf);
DBUG_RETURN(returnable);
}
int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf)
{
unsigned int read;
int error;
uint *ptr, *end;
char *last;
size_t total_blob_length= 0;
MY_BITMAP *read_set= table->read_set;
DBUG_ENTER("ha_archive::get_row");
DBUG_ENTER("ha_archive::get_row_version2");
read= azread(file_to_read, buf, table->s->reclength);
DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %lu", (int) read,
table->s->reclength));
if (read == Z_STREAM_ERROR)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
read= azread(file_to_read, buf, table->s->reclength, &error);
/* If we read nothing we are at the end of the file */
if (read == 0)
DBUG_RETURN(HA_ERR_END_OF_FILE);
if (read != table->s->reclength)
{
DBUG_PRINT("ha_archive::get_row_version2", ("Read %u bytes expected %u",
read,
(unsigned int)table->s->reclength));
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
}
if (error == Z_STREAM_ERROR || error == Z_DATA_ERROR )
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
/*
If the record is the wrong size, the file is probably damaged, unless
we are dealing with a delayed insert or a bulk insert.
......@@ -1109,7 +1110,11 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
if (bitmap_is_set(read_set,
((Field_blob*) table->field[*ptr])->field_index))
{
read= azread(file_to_read, last, size);
read= azread(file_to_read, last, size, &error);
if (error)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
if ((size_t) read != size)
DBUG_RETURN(HA_ERR_END_OF_FILE);
((Field_blob*) table->field[*ptr])->set_ptr(size, last);
......@@ -1215,39 +1220,27 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
char writer_filename[FN_REFLEN];
/* Open up the writer if we haven't yet */
if (!share->archive_write_open)
init_archive_writer();
/* Flush any waiting data */
if (share->archive_write_open)
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
/* Lets create a file to contain the new data */
fn_format(writer_filename, share->table_name, "", ARN,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
if (!(azopen(&writer, writer_filename, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)))
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR|O_BINARY)))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
/*
An extended rebuild is a lot more effort. We open up each row and re-record it.
Any dead rows are removed (aka rows that may have been partially recorded).
*/
if (check_opt->flags == T_EXTEND)
{
DBUG_PRINT("info", ("archive extended rebuild"));
byte *buf;
/*
First we create a buffer that we can use for reading rows, and can pass
to get_row().
As of Archive format 3, this is the only type that is performed, before this
version it was just done on T_EXTEND
*/
if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
if (1)
{
rc= HA_ERR_OUT_OF_MEM;
goto error;
}
DBUG_PRINT("ha_archive", ("archive extended rebuild"));
/*
Now we will rewind the archive file so that we are positioned at the
......@@ -1255,13 +1248,6 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
*/
rc= read_data_header(&archive);
/*
Assuming now error from rewinding the archive file, we now write out the
new header for out data file.
*/
if (!rc)
rc= write_data_header(&writer);
/*
On success of writing out the new header, we now fetch each row and
insert it into the new archive file.
......@@ -1269,77 +1255,57 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
if (!rc)
{
share->rows_recorded= 0;
stats.auto_increment_value= share->auto_increment_value= 0;
while (!(rc= get_row(&archive, buf)))
stats.auto_increment_value= share->archive_write.auto_increment= 0;
my_bitmap_map *org_bitmap= dbug_tmp_use_all_columns(table, table->read_set);
while (!(rc= get_row(&archive, table->record[0])))
{
real_write_row(buf, &writer);
real_write_row(table->record[0], &writer);
/*
Long term it should be possible to optimize this so that
it is not called on each row.
*/
if (table->found_next_number_field)
{
Field *field= table->found_next_number_field;
ulonglong auto_value=
(ulonglong) field->val_int((char*)(buf +
(ulonglong) field->val_int((char*)(table->record[0] +
field->offset(table->record[0])));
if (share->auto_increment_value < auto_value)
stats.auto_increment_value= share->auto_increment_value=
if (share->archive_write.auto_increment < auto_value)
stats.auto_increment_value= share->archive_write.auto_increment=
auto_value;
}
share->rows_recorded++;
}
dbug_tmp_restore_column_map(table->read_set, org_bitmap);
share->rows_recorded= writer.rows;
}
DBUG_PRINT("info", ("recovered %lu archive rows", (ulong) share->rows_recorded));
my_free((char*)buf, MYF(0));
DBUG_PRINT("info", ("recovered %llu archive rows",
(unsigned long long)share->rows_recorded));
DBUG_PRINT("ha_archive", ("recovered %llu archive rows",
(unsigned long long)share->rows_recorded));
if (rc && rc != HA_ERR_END_OF_FILE)
goto error;
}
else
{
DBUG_PRINT("info", ("archive quick rebuild"));
/*
The quick method is to just read the data raw, and then compress it directly.
*/
int read; // Bytes read, azread() returns int
char block[IO_SIZE];
if (azrewind(&archive) == -1)
{
rc= HA_ERR_CRASHED_ON_USAGE;
DBUG_PRINT("info", ("archive HA_ERR_CRASHED_ON_USAGE"));
goto error;
}
while ((read= azread(&archive, block, IO_SIZE)) > 0)
azwrite(&writer, block, read);
}
azclose(&writer);
share->dirty= FALSE;
share->forced_flushes= 0;
// now we close both our writer and our reader for the rename
azclose(&(share->archive_write));
share->archive_write_open= 0;
azclose(&archive);
// make the file we just wrote be our data file
rc = my_rename(writer_filename,share->data_file_name,MYF(0));
/*
now open the shared writer back up
we don't check rc here because we want to open the file back up even
if the optimize failed but we will return rc below so that we will
know it failed.
We also need to reopen our read descriptor since it has changed.
*/
DBUG_PRINT("info", ("Reopening archive data file"));
if (!azopen(&(share->archive_write), share->data_file_name,
O_WRONLY|O_APPEND|O_BINARY) ||
!azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY))
{
DBUG_PRINT("info", ("Could not open archive write file"));
rc= HA_ERR_CRASHED_ON_USAGE;
}
DBUG_RETURN(rc);
error:
DBUG_PRINT("ha_archive", ("Failed to recover, error was %d", rc));
azclose(&writer);
DBUG_RETURN(rc);
......@@ -1397,8 +1363,10 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
{
create_info->auto_increment_value= stats.auto_increment_value;
}
if (*share->real_path)
#ifdef DISABLED
if (share->real_path)
create_info->data_file_name= share->real_path;
#endif
}
......@@ -1431,7 +1399,10 @@ int ha_archive::info(uint flag)
stats.index_file_length=0;
if (flag & HA_STATUS_AUTO)
stats.auto_increment_value= share->auto_increment_value;
{
azflush(&archive, Z_SYNC_FLUSH);
stats.auto_increment_value= archive.auto_increment;
}
DBUG_RETURN(0);
}
......@@ -1499,7 +1470,6 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
old_proc_info= thd_proc_info(thd, "Checking table");
/* Flush any waiting data */
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->forced_flushes++;
/*
First we create a buffer that we can use for reading rows, and can pass
......@@ -1547,6 +1517,36 @@ bool ha_archive::check_and_repair(THD *thd)
DBUG_RETURN(repair(thd, &check_opt));
}
archive_record_buffer *ha_archive::create_record_buffer(unsigned int length)
{
DBUG_ENTER("ha_archive::create_record_buffer");
archive_record_buffer *r;
if (!(r=
(archive_record_buffer*) my_malloc(sizeof(archive_record_buffer),
MYF(MY_WME))))
{
DBUG_RETURN(NULL); /* purecov: inspected */
}
r->length= (int)length;
if (!(r->buffer= (byte*) my_malloc(r->length,
MYF(MY_WME))))
{
my_free((char*) r, MYF(MY_ALLOW_ZERO_PTR));
DBUG_RETURN(NULL); /* purecov: inspected */
}
DBUG_RETURN(r);
}
void ha_archive::destroy_record_buffer(archive_record_buffer *r)
{
DBUG_ENTER("ha_archive::destroy_record_buffer");
my_free((char*) r->buffer, MYF(MY_ALLOW_ZERO_PTR));
my_free((char*) r, MYF(MY_ALLOW_ZERO_PTR));
DBUG_VOID_RETURN;
}
struct st_mysql_storage_engine archive_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
......@@ -1560,7 +1560,7 @@ mysql_declare_plugin(archive)
PLUGIN_LICENSE_GPL,
archive_db_init, /* Plugin Init */
archive_db_done, /* Plugin Deinit */
0x0100 /* 1.0 */,
0x0300 /* 1.0 */,
NULL, /* status variables */
NULL, /* system variables */
NULL /* config options */
......
......@@ -26,34 +26,39 @@
ha_example.h.
*/
typedef struct st_archive_record_buffer {
byte *buffer;
uint32 length;
} archive_record_buffer;
typedef struct st_archive_share {
char *table_name;
char data_file_name[FN_REFLEN];
uint table_name_length,use_count;
pthread_mutex_t mutex;
THR_LOCK lock;
File meta_file; /* Meta file we use */
azio_stream archive_write; /* Archive file we are working with */
bool archive_write_open;
bool dirty; /* Flag for if a flush should occur */
bool crashed; /* Meta file is crashed */
ha_rows rows_recorded; /* Number of rows in tables */
ulonglong auto_increment_value;
ulonglong forced_flushes;
ulonglong mean_rec_length;
char real_path[FN_REFLEN];
} ARCHIVE_SHARE;
/*
Version for file format.
1 - Initial Version
1 - Initial Version (Never Released)
2 - Stream Compression, seperate blobs, no packing
3 - One steam (row and blobs), with packing
*/
#define ARCHIVE_VERSION 2
#define ARCHIVE_VERSION 3
class ha_archive: public handler
{
THR_LOCK_DATA lock; /* MySQL lock */
ARCHIVE_SHARE *share; /* Shared lock info */
azio_stream archive; /* Archive file we are working with */
my_off_t current_position; /* The position of the row we just read */
byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */
......@@ -64,6 +69,10 @@ class ha_archive: public handler
const byte *current_key;
uint current_key_len;
uint current_k_offset;
archive_record_buffer *record_buffer;
archive_record_buffer *create_record_buffer(unsigned int length);
void destroy_record_buffer(archive_record_buffer *r);
public:
ha_archive(handlerton *hton, TABLE_SHARE *table_arg);
......@@ -104,21 +113,13 @@ public:
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
int get_row(azio_stream *file_to_read, byte *buf);
int read_meta_file(File meta_file, ha_rows *rows,
ulonglong *auto_increment,
ulonglong *forced_flushes,
char *real_path);
int write_meta_file(File meta_file, ha_rows rows,
ulonglong auto_increment,
ulonglong forced_flushes,
char *real_path,
bool dirty);
int get_row_version2(azio_stream *file_to_read, byte *buf);
int get_row_version3(azio_stream *file_to_read, byte *buf);
ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc);
int free_share(ARCHIVE_SHARE *share);
int init_archive_writer();
bool auto_repair() const { return 1; } // For the moment we just do this
int read_data_header(azio_stream *file_to_read);
int write_data_header(azio_stream *file_to_write);
void position(const byte *record);
int info(uint);
void update_create_info(HA_CREATE_INFO *create_info);
......@@ -136,5 +137,9 @@ public:
bool is_crashed() const;
int check(THD* thd, HA_CHECK_OPT* check_opt);
bool check_and_repair(THD *thd);
uint32 max_row_length(const byte *buf);
bool fix_rec_buff(unsigned int length);
int unpack_row(azio_stream *file_to_read, char *record);
unsigned int pack_row(byte *record);
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment