Commit 48f8a7da authored by unknown's avatar unknown

ha_tina cleanup: fixed several compiler warnings, which occured because some methods

were changed in handler, but not in ha_tina. Added some commments. Fixed the code to fit
into my editor window :)


sql/examples/ha_tina.cc:
  Cleanup: Fix comments and code to fit 80-char lines, remove trailing spaces,
           add spaces after commas in the function calls.
           Also added/clarified several comments
sql/examples/ha_tina.h:
  fix warinings, add some comments about chain usage
parent b4d5576a
......@@ -17,13 +17,17 @@
/*
Make sure to look at ha_tina.h for more details.
First off, this is a play thing for me, there are a number of things wrong with it:
*) It was designed for csv and therefor its performance is highly questionable.
*) Indexes have not been implemented. This is because the files can be traded in
and out of the table directory without having to worry about rebuilding anything.
First off, this is a play thing for me, there are a number of things
wrong with it:
*) It was designed for csv and therefore its performance is highly
questionable.
*) Indexes have not been implemented. This is because the files can
be traded in and out of the table directory without having to worry
about rebuilding anything.
*) NULLs and "" are treated equally (like a spreadsheet).
*) There was in the beginning no point to anyone seeing this other then me, so there
is a good chance that I haven't quite documented it well.
*) There was in the beginning no point to anyone seeing this other
then me, so there is a good chance that I haven't quite documented
it well.
*) Less design, more "make it work"
Now there are a few cool things with it:
......@@ -173,11 +177,12 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
return NULL;
}
share->use_count=0;
share->table_name_length=length;
share->table_name=tmp_name;
strmov(share->table_name,table_name);
fn_format(data_file_name, table_name, "", ".CSV",MY_REPLACE_EXT|MY_UNPACK_FILENAME);
share->use_count= 0;
share->table_name_length= length;
share->table_name= tmp_name;
strmov(share->table_name, table_name);
fn_format(data_file_name, table_name, "", ".CSV",
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
if (my_hash_insert(&tina_open_tables, (byte*) share))
goto error;
thr_lock_init(&share->lock);
......@@ -186,11 +191,14 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
if ((share->data_file= my_open(data_file_name, O_RDWR, MYF(0))) == -1)
goto error2;
/* We only use share->data_file for writing, so we scan to the end to append */
/*
We only use share->data_file for writing, so we scan to
the end to append
*/
if (my_seek(share->data_file, 0, SEEK_END, MYF(0)) == MY_FILEPOS_ERROR)
goto error2;
share->mapped_file= NULL; // We don't know the state since we just allocated it
share->mapped_file= NULL; // We don't know the state as we just allocated it
if (get_mmap(share, 0) > 0)
goto error3;
}
......@@ -256,7 +264,8 @@ ha_tina::ha_tina(TABLE *table_arg)
These definitions are found in hanler.h
These are not probably completely right.
*/
current_position(0), next_position(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH)
current_position(0), next_position(0), chain_alloced(0),
chain_size(DEFAULT_CHAIN_LENGTH)
{
/* Set our original buffers from pre-allocated memory */
buffer.set(byte_buffer, IO_SIZE, system_charset_info);
......@@ -324,7 +333,9 @@ int ha_tina::encode_quote(byte *buf)
}
/*
chain_append() adds delete positions to the chain that we use to keep track of space.
chain_append() adds delete positions to the chain that we use to keep
track of space. Then the chain will be used to cleanup "holes", occured
due to deletes and updates.
*/
int ha_tina::chain_append()
{
......@@ -340,12 +351,14 @@ int ha_tina::chain_append()
if (chain_alloced)
{
/* Must cast since my_malloc unlike malloc doesn't have a void ptr */
if ((chain= (tina_set *)my_realloc((gptr)chain,chain_size,MYF(MY_WME))) == NULL)
if ((chain= (tina_set *) my_realloc((gptr)chain,
chain_size, MYF(MY_WME))) == NULL)
return -1;
}
else
{
tina_set *ptr= (tina_set *)my_malloc(chain_size * sizeof(tina_set),MYF(MY_WME));
tina_set *ptr= (tina_set *) my_malloc(chain_size * sizeof(tina_set),
MYF(MY_WME));
memcpy(ptr, chain, DEFAULT_CHAIN_LENGTH * sizeof(tina_set));
chain= ptr;
chain_alloced++;
......@@ -371,7 +384,8 @@ int ha_tina::find_current_row(byte *buf)
DBUG_ENTER("ha_tina::find_current_row");
/* EOF should be counted as new line */
if ((end_ptr= find_eoln(share->mapped_file, current_position, share->file_stat.st_size)) == 0)
if ((end_ptr= find_eoln(share->mapped_file, current_position,
share->file_stat.st_size)) == 0)
DBUG_RETURN(HA_ERR_END_OF_FILE);
for (Field **field=table->field ; *field ; field++)
......@@ -380,9 +394,10 @@ int ha_tina::find_current_row(byte *buf)
mapped_ptr++; // Increment past the first quote
for(;mapped_ptr != end_ptr; mapped_ptr++)
{
//Need to convert line feeds!
// Need to convert line feeds!
if (*mapped_ptr == '"' &&
(((mapped_ptr[1] == ',') && (mapped_ptr[2] == '"')) || (mapped_ptr == end_ptr -1 )))
(((mapped_ptr[1] == ',') && (mapped_ptr[2] == '"')) ||
(mapped_ptr == end_ptr -1 )))
{
mapped_ptr += 2; // Move past the , and the "
break;
......@@ -521,17 +536,19 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
/*
Deletes a row. First the database will find the row, and then call this method.
In the case of a table scan, the previous call to this will be the ::rnd_next()
that found this row.
The exception to this is an ORDER BY. This will cause the table handler to walk
the table noting the positions of all rows that match a query. The table will
then be deleted/positioned based on the ORDER (so RANDOM, DESC, ASC).
Deletes a row. First the database will find the row, and then call this
method. In the case of a table scan, the previous call to this will be
the ::rnd_next() that found this row.
The exception to this is an ORDER BY. This will cause the table handler
to walk the table noting the positions of all rows that match a query.
The table will then be deleted/positioned based on the ORDER (so RANDOM,
DESC, ASC).
*/
int ha_tina::delete_row(const byte * buf)
{
DBUG_ENTER("ha_tina::delete_row");
statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
statistic_increment(table->in_use->status_var.ha_delete_count,
&LOCK_status);
if (chain_append())
DBUG_RETURN(-1);
......@@ -653,22 +670,26 @@ int ha_tina::rnd_init(bool scan)
chain_ptr= chain;
#ifdef HAVE_MADVISE
if (scan)
(void)madvise(share->mapped_file,share->file_stat.st_size,MADV_SEQUENTIAL);
(void) madvise(share->mapped_file, share->file_stat.st_size,
MADV_SEQUENTIAL);
#endif
DBUG_RETURN(0);
}
/*
::rnd_next() does all the heavy lifting for a table scan. You will need to populate *buf
with the correct field data. You can walk the field to determine at what position you
should store the data (take a look at how ::find_current_row() works). The structure
is something like:
::rnd_next() does all the heavy lifting for a table scan. You will need to
populate *buf with the correct field data. You can walk the field to
determine at what position you should store the data (take a look at how
::find_current_row() works). The structure is something like:
0Foo Dog Friend
The first offset is for the first attribute. All space before that is reserved for null count.
Basically this works as a mask for which rows are nulled (compared to just empty).
This table handler doesn't do nulls and does not know the difference between NULL and "". This
is ok since this table handler is for spreadsheets and they don't know about them either :)
The first offset is for the first attribute. All space before that is
reserved for null count.
Basically this works as a mask for which rows are nulled (compared to just
empty).
This table handler doesn't do nulls and does not know the difference between
NULL and "". This is ok since this table handler is for spreadsheets and
they don't know about them either :)
*/
int ha_tina::rnd_next(byte *buf)
{
......@@ -755,8 +776,10 @@ int ha_tina::reset(void)
/*
Called after deletes, inserts, and updates. This is where we clean up all of
the dead space we have collected while writing the file.
Called after each table scan. In particular after deletes,
and updates. In the last case we employ chain of deleted
slots to clean up all of the dead space we have collected while
performing deletes/updates.
*/
int ha_tina::rnd_end()
{
......@@ -781,7 +804,8 @@ int ha_tina::rnd_end()
It also sorts so that we move the final blocks to the
beginning so that we move the smallest amount of data possible.
*/
qsort(chain, (size_t)(chain_ptr - chain), sizeof(tina_set), (qsort_cmp)sort_set);
qsort(chain, (size_t)(chain_ptr - chain), sizeof(tina_set),
(qsort_cmp)sort_set);
for (ptr= chain; ptr < chain_ptr; ptr++)
{
/* We peek a head to see if this is the last chain */
......@@ -789,7 +813,8 @@ int ha_tina::rnd_end()
memmove(share->mapped_file + ptr->begin, share->mapped_file + ptr->end,
length - (size_t)ptr->end);
else
memmove((caddr_t)share->mapped_file + ptr->begin, (caddr_t)share->mapped_file + ptr->end,
memmove((caddr_t)share->mapped_file + ptr->begin,
(caddr_t)share->mapped_file + ptr->end,
(size_t)((ptr++)->begin - ptr->end));
length= length - (size_t)(ptr->end - ptr->begin);
}
......@@ -852,11 +877,8 @@ THR_LOCK_DATA **ha_tina::store_lock(THD *thd,
Range optimizer calls this.
I need to update the information on this.
*/
ha_rows ha_tina::records_in_range(int inx,
const byte *start_key,uint start_key_len,
enum ha_rkey_function start_search_flag,
const byte *end_key,uint end_key_len,
enum ha_rkey_function end_search_flag)
ha_rows ha_tina::records_in_range(uint inx, key_range *min_key,
key_range *max_key)
{
DBUG_ENTER("ha_tina::records_in_range ");
DBUG_RETURN(records); // Good guess
......@@ -868,13 +890,15 @@ ha_rows ha_tina::records_in_range(int inx,
this (the database will call ::open() if it needs to).
*/
int ha_tina::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info)
int ha_tina::create(const char *name, TABLE *table_arg,
HA_CREATE_INFO *create_info)
{
char name_buff[FN_REFLEN];
File create_file;
DBUG_ENTER("ha_tina::create");
if ((create_file= my_create(fn_format(name_buff,name,"",".CSV",MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
if ((create_file= my_create(fn_format(name_buff, name, "", ".CSV",
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
DBUG_RETURN(-1);
......
......@@ -43,6 +43,11 @@ class ha_tina: public handler
off_t next_position; /* Next position in the file scan */
byte byte_buffer[IO_SIZE];
String buffer;
/*
The chain contains "holes" in the file, occured because of
deletes/updates. It is used in rnd_end() to get rid of them
in the end of the query.
*/
tina_set chain_buffer[DEFAULT_CHAIN_LENGTH];
tina_set *chain;
tina_set *chain_ptr;
......@@ -78,7 +83,11 @@ class ha_tina: public handler
*/
virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
/* The next method will never be called */
virtual double read_time(ha_rows rows) { DBUG_ASSERT(0); return((double) rows / 20.0+1); }
virtual double read_time(uint index, uint ranges, ha_rows rows)
{
DBUG_ASSERT(0);
return((double) rows / 20.0+1);
}
virtual bool fast_key_read() { return 1;}
/*
TODO: return actual upper bound of number of records in the table.
......@@ -110,10 +119,8 @@ class ha_tina: public handler
int reset(void);
int external_lock(THD *thd, int lock_type);
int delete_all_rows(void);
ha_rows records_in_range(int inx, const byte *start_key,uint start_key_len,
enum ha_rkey_function start_search_flag,
const byte *end_key,uint end_key_len,
enum ha_rkey_function end_search_flag);
ha_rows records_in_range(uint inx, key_range *min_key,
key_range *max_key);
// int delete_table(const char *from);
// int rename_table(const char * from, const char * to);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment