Commit c2f5dfef authored by unknown's avatar unknown

Merge pchardin@bk-internal.mysql.com:/home/bk/mysql-5.1-new

into  mysql.com:/home/cps/mysql/devel/5.1-repair-csv


storage/csv/ha_tina.cc:
  Auto merged
parents a78f6697 2a67aecd
......@@ -4993,6 +4993,99 @@ val
2
UNLOCK TABLES;
DROP TABLE test_concurrent_insert;
CREATE TABLE test_repair_table ( val integer ) ENGINE = CSV;
CHECK TABLE test_repair_table;
Table Op Msg_type Msg_text
test.test_repair_table check status OK
REPAIR TABLE test_repair_table;
Table Op Msg_type Msg_text
test.test_repair_table repair status OK
DROP TABLE test_repair_table;
CREATE TABLE test_repair_table2 ( val integer ) ENGINE = CSV;
SELECT * from test_repair_table2;
val
Warnings:
Error 1194 Table 'test_repair_table2' is marked as crashed and should be repaired
SELECT * from test_repair_table2;
val
test_repair_table2.CSM
CHECK TABLE test_repair_table2;
Table Op Msg_type Msg_text
test.test_repair_table2 check status OK
DROP TABLE test_repair_table2;
CREATE TABLE test_repair_table3 ( val integer ) ENGINE = CSV;
CHECK TABLE test_repair_table3;
Table Op Msg_type Msg_text
test.test_repair_table3 check error Corrupt
REPAIR TABLE test_repair_table3;
Table Op Msg_type Msg_text
test.test_repair_table3 repair status OK
SELECT * FROM test_repair_table3;
val
1
4
DROP TABLE test_repair_table3;
CREATE TABLE test_repair_table4 (
num int not null,
magic_no int(4) unsigned zerofill DEFAULT '0000' NOT NULL,
company_name char(30) DEFAULT '' NOT NULL,
founded char(4) DEFAULT '' NOT NULL
) ENGINE = CSV;
SELECT * FROM test_repair_table4;
num magic_no company_name founded
Warnings:
Error 1194 Table 'test_repair_table4' is marked as crashed and should be repaired
SELECT * FROM test_repair_table4;
num magic_no company_name founded
CHECK TABLE test_repair_table4;
Table Op Msg_type Msg_text
test.test_repair_table4 check status OK
INSERT INTO test_repair_table4 VALUES (2,101,'SAP','1972');
INSERT INTO test_repair_table4 VALUES (1,101,'Microsoft','1978');
INSERT INTO test_repair_table4 VALUES (2,101,'MySQL','1995');
SELECT * FROM test_repair_table4;
num magic_no company_name founded
2 0101 SAP 1972
1 0101 Microsoft 1978
2 0101 MySQL 1995
CHECK TABLE test_repair_table4;
Table Op Msg_type Msg_text
test.test_repair_table4 check status OK
REPAIR TABLE test_repair_table4;
Table Op Msg_type Msg_text
test.test_repair_table4 repair status OK
SELECT * FROM test_repair_table4;
num magic_no company_name founded
2 0101 SAP 1972
1 0101 Microsoft 1978
2 0101 MySQL 1995
CHECK TABLE test_repair_table4;
Table Op Msg_type Msg_text
test.test_repair_table4 check status OK
REPAIR TABLE test_repair_table4;
Table Op Msg_type Msg_text
test.test_repair_table4 repair status OK
SELECT * FROM test_repair_table4;
num magic_no company_name founded
2 0101 SAP 1972
1 0101 Microsoft 1978
2 0101 MySQL 1995
DROP TABLE test_repair_table4;
CREATE TABLE test_repair_table5 (
num int not null,
magic_no int(4) unsigned zerofill DEFAULT '0000' NOT NULL,
company_name char(30) DEFAULT '' NOT NULL,
founded char(4) DEFAULT '' NOT NULL
) ENGINE = CSV;
CHECK TABLE test_repair_table5;
Table Op Msg_type Msg_text
test.test_repair_table5 check error Corrupt
REPAIR TABLE test_repair_table5;
Table Op Msg_type Msg_text
test.test_repair_table5 repair status OK
SELECT * FROM test_repair_table5;
num magic_no company_name founded
DROP TABLE test_repair_table5;
create table t1 (a int) engine=csv;
insert t1 values (1);
delete from t1;
......
......@@ -1387,6 +1387,92 @@ UNLOCK TABLES;
# cleanup
DROP TABLE test_concurrent_insert;
#
# Test REPAIR/CHECK TABLE (5.1)
#
# Check that repair on the newly created table works fine
CREATE TABLE test_repair_table ( val integer ) ENGINE = CSV;
CHECK TABLE test_repair_table;
REPAIR TABLE test_repair_table;
DROP TABLE test_repair_table;
#
# Check autorepair. Here we also check that we can work w/o metafile
# restore the meta-file
#
CREATE TABLE test_repair_table2 ( val integer ) ENGINE = CSV;
--exec rm $MYSQLTEST_VARDIR/master-data/test/test_repair_table2.CSM
# should give a warning and perform autorepair
SELECT * from test_repair_table2;
# this should work ok, as the table is already repaired
SELECT * from test_repair_table2;
# check that the metafile appeared again. chop the path to it
--exec ls $MYSQLTEST_VARDIR/master-data/test/test_repair_table2.CSM | perl -pi -e "s/.*\///"
CHECK TABLE test_repair_table2;
DROP TABLE test_repair_table2;
# Corrupt csv file and see if we can repair it
CREATE TABLE test_repair_table3 ( val integer ) ENGINE = CSV;
--exec echo -n -e \"1\"\\n\"4\"\\n\"3 > $MYSQLTEST_VARDIR/master-data/test/test_repair_table3.CSV
CHECK TABLE test_repair_table3;
REPAIR TABLE test_repair_table3;
SELECT * FROM test_repair_table3;
DROP TABLE test_repair_table3;
# Test with more sophisticated table
CREATE TABLE test_repair_table4 (
num int not null,
magic_no int(4) unsigned zerofill DEFAULT '0000' NOT NULL,
company_name char(30) DEFAULT '' NOT NULL,
founded char(4) DEFAULT '' NOT NULL
) ENGINE = CSV;
--exec rm $MYSQLTEST_VARDIR/master-data/test/test_repair_table4.CSM
SELECT * FROM test_repair_table4;
SELECT * FROM test_repair_table4;
CHECK TABLE test_repair_table4;
INSERT INTO test_repair_table4 VALUES (2,101,'SAP','1972');
INSERT INTO test_repair_table4 VALUES (1,101,'Microsoft','1978');
INSERT INTO test_repair_table4 VALUES (2,101,'MySQL','1995');
# list table content
SELECT * FROM test_repair_table4;
CHECK TABLE test_repair_table4;
REPAIR TABLE test_repair_table4;
# check that nothing changed
SELECT * FROM test_repair_table4;
# verify that check/repair did non corrupt the table itself
CHECK TABLE test_repair_table4;
REPAIR TABLE test_repair_table4;
SELECT * FROM test_repair_table4;
DROP TABLE test_repair_table4;
# Run CHECK/REPAIR on the CSV file with a single row, which misses a column.
CREATE TABLE test_repair_table5 (
num int not null,
magic_no int(4) unsigned zerofill DEFAULT '0000' NOT NULL,
company_name char(30) DEFAULT '' NOT NULL,
founded char(4) DEFAULT '' NOT NULL
) ENGINE = CSV;
# Corrupt a table -- put a file with wrong # of columns
--exec echo -n -e \"1\",\"101\",\"IBM\"\\n > $MYSQLTEST_VARDIR/master-data/test/test_repair_table5.CSV
CHECK TABLE test_repair_table5;
REPAIR TABLE test_repair_table5;
SELECT * FROM test_repair_table5;
DROP TABLE test_repair_table5;
#
# BUG#13406 - incorrect amount of "records deleted"
#
......
......@@ -53,6 +53,24 @@
#include <mysql/plugin.h>
/*
uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + uchar
*/
#define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \
+ sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar)
#define TINA_CHECK_HEADER 254 // The number we use to determine corruption
/* The file extension */
#define CSV_EXT ".CSV" // The data file
#define CSN_EXT ".CSN" // Files used during repair
#define CSM_EXT ".CSM" // Meta file
static TINA_SHARE *get_share(const char *table_name, TABLE *table);
static int free_share(TINA_SHARE *share);
static int read_meta_file(File meta_file, ha_rows *rows);
static int write_meta_file(File meta_file, ha_rows rows, bool dirty);
/* Stuff for shares */
pthread_mutex_t tina_mutex;
static HASH tina_open_tables;
......@@ -197,6 +215,7 @@ static int tina_done_func()
static TINA_SHARE *get_share(const char *table_name, TABLE *table)
{
TINA_SHARE *share;
char meta_file_name[FN_REFLEN];
char *tmp_name;
uint length;
......@@ -214,7 +233,6 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
(byte*) table_name,
length)))
{
char data_file_name[FN_REFLEN];
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
&share, sizeof(*share),
&tmp_name, length+1,
......@@ -228,15 +246,39 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
share->is_log_table= FALSE;
share->table_name_length= length;
share->table_name= tmp_name;
share->crashed= FALSE;
share->rows_recorded= 0;
strmov(share->table_name, table_name);
fn_format(data_file_name, table_name, "", ".CSV",
fn_format(share->data_file_name, table_name, "", CSV_EXT,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
fn_format(meta_file_name, table_name, "", CSM_EXT,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
if (my_hash_insert(&tina_open_tables, (byte*) share))
goto error;
thr_lock_init(&share->lock);
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
if ((share->data_file= my_open(data_file_name, O_RDWR|O_APPEND,
/*
Open or create the meta file. In the latter case, we'll get
an error during read_meta_file and mark the table as crashed.
Usually this will result in auto-repair, and we will get a good
meta-file in the end.
*/
if ((share->meta_file= my_open(meta_file_name,
O_RDWR|O_CREAT, MYF(0))) == -1)
share->crashed= TRUE;
/*
After we read, we set the file to dirty. When we close, we will do the
opposite. If the meta file will not open we assume it is crashed and
mark it as such.
*/
if (read_meta_file(share->meta_file, &share->rows_recorded))
share->crashed= TRUE;
else
(void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
if ((share->data_file= my_open(share->data_file_name, O_RDWR|O_APPEND,
MYF(0))) == -1)
goto error2;
......@@ -272,6 +314,128 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
}
/*
Read CSV meta-file
SYNOPSIS
read_meta_file()
meta_file The meta-file filedes
ha_rows Pointer to the var we use to store rows count.
These are read from the meta-file.
DESCRIPTION
Read the meta-file info. For now we are only interested in
rows counf, crashed bit and magic number.
RETURN
0 - OK
non-zero - error occurred
*/
static int read_meta_file(File meta_file, ha_rows *rows)
{
uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
DBUG_ENTER("ha_tina::read_meta_file");
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0)
!= META_BUFFER_SIZE)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
/*
Parse out the meta data, we ignore version at the moment
*/
ptr+= sizeof(uchar)*2; // Move past header
*rows= (ha_rows)uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past rows
/*
Move past check_point, auto_increment and forced_flushes fields.
They are present in the format, but we do not use them yet.
*/
ptr+= 3*sizeof(ulonglong);
/* check crashed bit and magic number */
if ((meta_buffer[0] != (uchar)TINA_CHECK_HEADER) ||
((bool)(*ptr)== TRUE))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
my_sync(meta_file, MYF(MY_WME));
DBUG_RETURN(0);
}
/*
Write CSV meta-file
SYNOPSIS
write_meta_file()
meta_file The meta-file filedes
ha_rows The number of rows we have in the datafile.
dirty A flag, which marks whether we have a corrupt table
DESCRIPTION
Write meta-info the the file. Only rows count, crashed bit and
magic number matter now.
RETURN
0 - OK
non-zero - error occurred
*/
static int write_meta_file(File meta_file, ha_rows rows, bool dirty)
{
uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
DBUG_ENTER("ha_tina::write_meta_file");
*ptr= (uchar)TINA_CHECK_HEADER;
ptr+= sizeof(uchar);
*ptr= (uchar)TINA_VERSION;
ptr+= sizeof(uchar);
int8store(ptr, (ulonglong)rows);
ptr+= sizeof(ulonglong);
memset(ptr, 0, 3*sizeof(ulonglong));
/*
Skip over checkpoint, autoincrement and forced_flushes fields.
We'll need them later.
*/
ptr+= 3*sizeof(ulonglong);
*ptr= (uchar)dirty;
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0)
!= META_BUFFER_SIZE)
DBUG_RETURN(-1);
my_sync(meta_file, MYF(MY_WME));
DBUG_RETURN(0);
}
bool ha_tina::check_and_repair(THD *thd)
{
HA_CHECK_OPT check_opt;
DBUG_ENTER("ha_tina::check_and_repair");
check_opt.init();
DBUG_RETURN(repair(thd, &check_opt));
}
bool ha_tina::is_crashed() const
{
DBUG_ENTER("ha_tina::is_crashed");
DBUG_RETURN(share->crashed);
}
/*
Free lock controls.
*/
......@@ -281,7 +445,11 @@ static int free_share(TINA_SHARE *share)
pthread_mutex_lock(&tina_mutex);
int result_code= 0;
if (!--share->use_count){
/* Drop the mapped file */
/* Write the meta file. Mark it as crashed if needed. */
(void)write_meta_file(share->meta_file, share->rows_recorded,
share->crashed ? TRUE :FALSE);
if (my_close(share->meta_file, MYF(0)))
result_code= 1;
if (share->mapped_file)
my_munmap(share->mapped_file, share->file_stat.st_size);
result_code= my_close(share->data_file,MYF(0));
......@@ -410,7 +578,7 @@ int ha_tina::encode_quote(byte *buf)
/*
chain_append() adds delete positions to the chain that we use to keep
track of space. Then the chain will be used to cleanup "holes", occured
track of space. Then the chain will be used to cleanup "holes", occurred
due to deletes and updates.
*/
int ha_tina::chain_append()
......@@ -472,7 +640,10 @@ int ha_tina::find_current_row(byte *buf)
for (Field **field=table->field ; *field ; field++)
{
buffer.length(0);
if (*mapped_ptr == '"')
mapped_ptr++; // Increment past the first quote
else
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
for(;mapped_ptr != end_ptr; mapped_ptr++)
{
// Need to convert line feeds!
......@@ -498,9 +669,17 @@ int ha_tina::find_current_row(byte *buf)
buffer.append(*mapped_ptr);
}
}
else
else // ordinary symbol
{
/*
We are at final symbol and no last quote was found =>
we are working with a damaged file.
*/
if (mapped_ptr == end_ptr -1)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
buffer.append(*mapped_ptr);
}
}
(*field)->store(buffer.ptr(), buffer.length(), system_charset_info);
}
next_position= (end_ptr - share->mapped_file)+1;
......@@ -515,7 +694,8 @@ int ha_tina::find_current_row(byte *buf)
extensions exist for this handler.
*/
static const char *ha_tina_exts[] = {
".CSV",
CSV_EXT,
CSM_EXT,
NullS
};
......@@ -638,12 +818,18 @@ bool ha_tina::check_if_locking_is_allowed(uint sql_command,
this will not be called for every request. Any sort of positions
that need to be reset should be kept in the ::extra() call.
*/
int ha_tina::open(const char *name, int mode, uint test_if_locked)
int ha_tina::open(const char *name, int mode, uint open_options)
{
DBUG_ENTER("ha_tina::open");
if (!(share= get_share(name, table)))
DBUG_RETURN(1);
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
if (share->crashed && !(open_options & HA_OPEN_FOR_REPAIR))
{
free_share(share);
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
}
/*
Init locking. Pass handler object to the locking routines,
......@@ -681,6 +867,9 @@ int ha_tina::write_row(byte * buf)
int size;
DBUG_ENTER("ha_tina::write_row");
if (share->crashed)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
ha_statistic_increment(&SSV::ha_write_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
......@@ -704,13 +893,13 @@ int ha_tina::write_row(byte * buf)
/* update local copy of the max position to see our own changes */
local_saved_data_file_length= share->file_stat.st_size;
/* update shared info */
pthread_mutex_lock(&share->mutex);
share->rows_recorded++;
/* update status for the log tables */
if (share->is_log_table)
{
pthread_mutex_lock(&share->mutex);
update_status();
pthread_mutex_unlock(&share->mutex);
}
records++;
DBUG_RETURN(0);
......@@ -814,6 +1003,9 @@ int ha_tina::rnd_init(bool scan)
{
DBUG_ENTER("ha_tina::rnd_init");
if (share->crashed)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
current_position= next_position= 0;
records= 0;
records_is_known= 0;
......@@ -843,15 +1035,19 @@ int ha_tina::rnd_init(bool scan)
*/
int ha_tina::rnd_next(byte *buf)
{
int rc;
DBUG_ENTER("ha_tina::rnd_next");
if (share->crashed)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
current_position= next_position;
if (!share->mapped_file)
DBUG_RETURN(HA_ERR_END_OF_FILE);
if (HA_ERR_END_OF_FILE == find_current_row(buf) )
DBUG_RETURN(HA_ERR_END_OF_FILE);
if ((rc= find_current_row(buf)))
DBUG_RETURN(rc);
records++;
DBUG_RETURN(0);
......@@ -975,6 +1171,104 @@ int ha_tina::rnd_end()
}
/*
Repair CSV table in the case, it is crashed.
SYNOPSIS
repair()
thd The thread, performing repair
check_opt The options for repair. We do not use it currently.
DESCRIPTION
If the file is empty, change # of rows in the file and complete recovery.
Otherwise, scan the table looking for bad rows. If none were found,
we mark file as a good one and return. If a bad row was encountered,
we truncate the datafile up to the last good row.
TODO: Make repair more clever - it should try to recover subsequent
rows (after the first bad one) as well.
*/
int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
{
char repaired_fname[FN_REFLEN];
byte *buf;
File repair_file;
int rc;
ha_rows rows_repaired= 0;
DBUG_ENTER("ha_tina::repair");
/* empty file */
if (!share->mapped_file)
{
share->rows_recorded= 0;
goto end;
}
if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
/*
Local_saved_data_file_length is initialized during the lock phase.
Sometimes this is not getting executed before ::repair (e.g. for
the log tables). We set it manually here.
*/
local_saved_data_file_length= share->file_stat.st_size;
/* set current position to the beginning of the file */
current_position= next_position= 0;
/* Read the file row-by-row. If everything is ok, repair is not needed. */
while (!(rc= find_current_row(buf)))
{
rows_repaired++;
current_position= next_position;
}
my_free((char*)buf, MYF(0));
/* The file is ok */
if (rc == HA_ERR_END_OF_FILE)
{
/*
If rows_recorded != rows_repaired, we should update
rows_recorded value to the current amount of rows.
*/
share->rows_recorded= rows_repaired;
goto end;
}
/*
Otherwise we've encountered a bad row => repair is needed.
Let us create a temporary file.
*/
if ((repair_file= my_create(fn_format(repaired_fname, share->table_name,
"", CSN_EXT,
MY_REPLACE_EXT|MY_UNPACK_FILENAME),
0, O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
if (my_write(repair_file, (byte*)share->mapped_file, current_position,
MYF(MY_NABP)))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
my_close(repair_file, MYF(0));
/* we just truncated the file up to the first bad row. update rows count. */
share->rows_recorded= rows_repaired;
if (my_munmap(share->mapped_file, share->file_stat.st_size))
DBUG_RETURN(-1);
my_rename(repaired_fname, share->data_file_name, MYF(0));
/* We set it to null so that get_mmap() won't try to unmap it */
share->mapped_file= NULL;
if (get_mmap(share, 0) > 0)
DBUG_RETURN(-1);
end:
share->crashed= FALSE;
DBUG_RETURN(HA_ADMIN_OK);
}
/*
DELETE without WHERE calls this
*/
......@@ -1021,16 +1315,64 @@ int ha_tina::create(const char *name, TABLE *table_arg,
File create_file;
DBUG_ENTER("ha_tina::create");
if ((create_file= my_create(fn_format(name_buff, name, "", ".CSV",
if ((create_file= my_create(fn_format(name_buff, name, "", CSM_EXT,
MY_REPLACE_EXT|MY_UNPACK_FILENAME), 0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
DBUG_RETURN(-1);
write_meta_file(create_file, 0, FALSE);
my_close(create_file, MYF(0));
if ((create_file= my_create(fn_format(name_buff, name, "", CSV_EXT,
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
DBUG_RETURN(-1);
my_close(create_file,MYF(0));
my_close(create_file, MYF(0));
DBUG_RETURN(0);
}
int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt)
{
int rc= 0;
byte *buf;
const char *old_proc_info;
ha_rows count= share->rows_recorded;
DBUG_ENTER("ha_tina::check");
old_proc_info= thd_proc_info(thd, "Checking table");
if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
/*
Local_saved_data_file_length is initialized during the lock phase.
Check does not use store_lock in certain cases. So, we set it
manually here.
*/
local_saved_data_file_length= share->file_stat.st_size;
/* set current position to the beginning of the file */
current_position= next_position= 0;
/* Read the file row-by-row. If everything is ok, repair is not needed. */
while (!(rc= find_current_row(buf)))
{
count--;
current_position= next_position;
}
my_free((char*)buf, MYF(0));
thd_proc_info(thd, old_proc_info);
if ((rc != HA_ERR_END_OF_FILE) || count)
{
share->crashed= TRUE;
DBUG_RETURN(HA_ADMIN_CORRUPT);
}
else
DBUG_RETURN(HA_ADMIN_OK);
}
bool ha_tina::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
......
......@@ -19,9 +19,16 @@
#include <my_dir.h>
#define DEFAULT_CHAIN_LENGTH 512
/*
Version for file format.
1 - Initial Version. That is, the version when the metafile was introduced.
*/
#define TINA_VERSION 1
typedef struct st_tina_share {
char *table_name;
char data_file_name[FN_REFLEN];
byte *mapped_file; /* mapped region of file */
uint table_name_length, use_count;
/*
......@@ -39,6 +46,9 @@ typedef struct st_tina_share {
off_t saved_data_file_length;
pthread_mutex_t mutex;
THR_LOCK lock;
File meta_file; /* Meta file we use */
bool crashed; /* Meta file is crashed */
ha_rows rows_recorded; /* Number of rows in tables */
} TINA_SHARE;
typedef struct tina_set {
......@@ -108,7 +118,7 @@ class ha_tina: public handler
ulong type, TABLE *table,
uint count,
bool called_by_logger_thread);
int open(const char *name, int mode, uint test_if_locked);
int open(const char *name, int mode, uint open_options);
int close(void);
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
......@@ -116,7 +126,13 @@ class ha_tina: public handler
int rnd_init(bool scan=1);
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
bool check_and_repair(THD *thd);
int check(THD* thd, HA_CHECK_OPT* check_opt);
bool is_crashed() const;
int rnd_end();
int repair(THD* thd, HA_CHECK_OPT* check_opt);
/* This is required for SQL layer to know that we support autorepair */
bool auto_repair() const { return 1; }
void position(const byte *record);
void info(uint);
int extra(enum ha_extra_function operation);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment