Commit 890d183f authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

addresses #1567, BUILD

remove TOKUDB_DIRTY_DICTIONARY references, as they have been removed
from db.h, add an infinity byte to row descriptor

git-svn-id: file:///svn/mysql/tokudb-engine/src@10965 c7de825b-a66e-492c-adef-691d508d4ae1
parent 85a6ac94
...@@ -498,7 +498,6 @@ int ha_tokudb::open_secondary_table(DB** ptr, KEY* key_info, const char* name, i ...@@ -498,7 +498,6 @@ int ha_tokudb::open_secondary_table(DB** ptr, KEY* key_info, const char* name, i
int error = ENOSYS; int error = ENOSYS;
char part[MAX_ALIAS_NAME + 10]; char part[MAX_ALIAS_NAME + 10];
char name_buff[FN_REFLEN]; char name_buff[FN_REFLEN];
char error_msg[MAX_ALIAS_NAME + 50]; //50 is arbitrary upper bound of extra txt
uint open_flags = (mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD; uint open_flags = (mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD;
char* newname = NULL; char* newname = NULL;
newname = (char *)my_malloc(strlen(name) + NAME_CHAR_LEN, MYF(MY_WME)); newname = (char *)my_malloc(strlen(name) + NAME_CHAR_LEN, MYF(MY_WME));
...@@ -531,10 +530,6 @@ int ha_tokudb::open_secondary_table(DB** ptr, KEY* key_info, const char* name, i ...@@ -531,10 +530,6 @@ int ha_tokudb::open_secondary_table(DB** ptr, KEY* key_info, const char* name, i
(*ptr)->api_internal = share->file->app_private; (*ptr)->api_internal = share->file->app_private;
if ((error = (*ptr)->open(*ptr, 0, name_buff, NULL, DB_BTREE, open_flags, 0))) { if ((error = (*ptr)->open(*ptr, 0, name_buff, NULL, DB_BTREE, open_flags, 0))) {
if (error == TOKUDB_DIRTY_DICTIONARY) {
sprintf(error_msg, "File %s is dirty, not opening DB", name_buff);
sql_print_error(error_msg);
}
my_errno = error; my_errno = error;
goto cleanup; goto cleanup;
} }
...@@ -564,7 +559,6 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) { ...@@ -564,7 +559,6 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) {
TOKUDB_OPEN(); TOKUDB_OPEN();
char name_buff[FN_REFLEN]; char name_buff[FN_REFLEN];
char error_msg[MAX_ALIAS_NAME + 50]; //50 is arbitrary upper bound of extra txt
uint open_flags = (mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD; uint open_flags = (mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD;
uint max_key_length; uint max_key_length;
int error; int error;
...@@ -687,10 +681,6 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) { ...@@ -687,10 +681,6 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) {
make_name(newname, name, "main"); make_name(newname, name, "main");
fn_format(name_buff, newname, "", 0, MY_UNPACK_FILENAME); fn_format(name_buff, newname, "", 0, MY_UNPACK_FILENAME);
if ((error = share->file->open(share->file, 0, name_buff, NULL, DB_BTREE, open_flags, 0))) { if ((error = share->file->open(share->file, 0, name_buff, NULL, DB_BTREE, open_flags, 0))) {
if (error == TOKUDB_DIRTY_DICTIONARY) {
sprintf(error_msg, "File %s is dirty, not opening DB", name_buff);
sql_print_error(error_msg);
}
free_share(share, 1); free_share(share, 1);
my_free((char *) rec_buff, MYF(0)); my_free((char *) rec_buff, MYF(0));
rec_buff = NULL; rec_buff = NULL;
...@@ -1522,7 +1512,6 @@ int ha_tokudb::get_status() { ...@@ -1522,7 +1512,6 @@ int ha_tokudb::get_status() {
DBT key, value; DBT key, value;
HA_METADATA_KEY curr_key; HA_METADATA_KEY curr_key;
int error; int error;
char error_msg[MAX_ALIAS_NAME + 50]; //50 is arbitrary upper bound of extra txt
char* newname = NULL; char* newname = NULL;
// //
// open status.tokudb // open status.tokudb
...@@ -1546,10 +1535,6 @@ int ha_tokudb::get_status() { ...@@ -1546,10 +1535,6 @@ int ha_tokudb::get_status() {
error = share->status_block->open(share->status_block, NULL, name_buff, NULL, DB_BTREE, open_mode, 0); error = share->status_block->open(share->status_block, NULL, name_buff, NULL, DB_BTREE, open_mode, 0);
if (error) { if (error) {
if (error == TOKUDB_DIRTY_DICTIONARY) {
sprintf(error_msg, "File %s is dirty, not opening DB", name_buff);
sql_print_error(error_msg);
}
goto cleanup; goto cleanup;
} }
} }
......
...@@ -1343,14 +1343,17 @@ int create_toku_descriptor( ...@@ -1343,14 +1343,17 @@ int create_toku_descriptor(
assert(!(is_clustering_key && !is_second_hpk && second_key == NULL)); assert(!(is_clustering_key && !is_second_hpk && second_key == NULL));
if (is_first_hpk) { if (is_first_hpk) {
pos[0] = 0; //field cannot be NULL, stating it pos[0] = 0; //say there is NO infinity byte
pos[1] = toku_type_hpk; pos[1] = 0; //field cannot be NULL, stating it
pos += 2; pos[2] = toku_type_hpk;
pos += 3;
} }
else { else {
// //
// first key is NOT a hidden primary key, so we now pack first_key // first key is NOT a hidden primary key, so we now pack first_key
// //
pos[0] = 1; //say there is an infinity byte
pos++;
num_bytes = create_toku_key_descriptor(first_key, pos); num_bytes = create_toku_key_descriptor(first_key, pos);
pos += num_bytes; pos += num_bytes;
} }
...@@ -1382,14 +1385,17 @@ int create_toku_descriptor( ...@@ -1382,14 +1385,17 @@ int create_toku_descriptor(
// write in the offset to this position in the first four bytes // write in the offset to this position in the first four bytes
// //
if (is_second_hpk) { if (is_second_hpk) {
pos[0] = 0; //field cannot be NULL, stating it pos[0] = 0; //say there is NO infinity byte
pos[1] = toku_type_hpk; pos[1] = 0; //field cannot be NULL, stating it
pos += 2; pos[2] = toku_type_hpk;
pos += 3;
} }
else { else {
// //
// second key is NOT a hidden primary key, so we now pack second_key // second key is NOT a hidden primary key, so we now pack second_key
// //
pos[0] = 1; //say there is an infinity byte
pos++;
num_bytes = create_toku_key_descriptor(second_key, pos); num_bytes = create_toku_key_descriptor(second_key, pos);
pos += num_bytes; pos += num_bytes;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment