Commit 1915677d authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

Addresses #993

Removed debug verification for heaviside functions

git-svn-id: file:///svn/mysql/tokudb-engine/src@5157 c7de825b-a66e-492c-adef-691d508d4ae1
parent 798179fd
...@@ -2568,6 +2568,9 @@ static void dbt_copy_heavi(DBT const *key, DBT const *val, void *extra_f, int r_ ...@@ -2568,6 +2568,9 @@ static void dbt_copy_heavi(DBT const *key, DBT const *val, void *extra_f, int r_
DBT_COPY_INFO info = (DBT_COPY_INFO)extra_f; DBT_COPY_INFO info = (DBT_COPY_INFO)extra_f;
int r; int r;
assert(info->key->flags==0);
assert(info->val->flags==0);
info->key->size = key->size; info->key->size = key->size;
info->key->data = malloc(key->size); info->key->data = malloc(key->size);
if (!info->key->data) { r = errno; goto cleanup; } if (!info->key->data) { r = errno; goto cleanup; }
...@@ -2703,7 +2706,6 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_ ...@@ -2703,7 +2706,6 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
// TOKUDB_DBUG_DUMP("key=", key, key_len); // TOKUDB_DBUG_DUMP("key=", key, key_len);
DBT row; DBT row;
int error; int error;
int h_error;
CHECK_VALID_CURSOR(); CHECK_VALID_CURSOR();
...@@ -2717,8 +2719,8 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_ ...@@ -2717,8 +2719,8 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
struct dbt_copy_info copy_info; //Needed as part of the smart dbt. struct dbt_copy_info copy_info; //Needed as part of the smart dbt.
struct heavi_info heavi_info; //Needed for the heaviside function. struct heavi_info heavi_info; //Needed for the heaviside function.
copy_info.key = &h_key; copy_info.key = &last_key;
copy_info.val = &h_val; copy_info.val = &row;
heavi_info.db = share->key_file[active_index]; heavi_info.db = share->key_file[active_index];
heavi_info.key = &last_key; heavi_info.key = &last_key;
switch (find_flag) { switch (find_flag) {
...@@ -2733,54 +2735,15 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_ ...@@ -2733,54 +2735,15 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
break; break;
case HA_READ_AFTER_KEY: /* Find next rec. after key-record */ case HA_READ_AFTER_KEY: /* Find next rec. after key-record */
//Heaviside //Heaviside
h_error = cursor->c_getf_heavi(cursor, 0, dbt_copy_heavi, &copy_info, error = cursor->c_getf_heavi(cursor, 0, dbt_copy_heavi, &copy_info,
after_key_heavi, &heavi_info, 1); after_key_heavi, &heavi_info, 1);
if (h_error==0 && copy_info.error!=0) h_error = copy_info.error; if (error==0 && copy_info.error!=0) error = copy_info.error;
//Old Slow
error = cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE);
if (error == 0) {
DBT orig_key;
pack_key(&orig_key, active_index, key_buff2, key, key_len);
for (;;) {
if (tokudb_prefix_cmp_packed_key(share->key_file[active_index], &orig_key, &last_key) != 0)
break;
error = cursor->c_get(cursor, &last_key, &row, DB_NEXT_NODUP);
if (error != 0)
break;
}
}
//verify
assert(error==h_error);
if (error==0) {
assert(h_key.size == last_key.size);
assert(h_val.size == row.size);
assert(!memcmp(h_key.data, last_key.data, h_key.size));
assert(!memcmp(h_val.data, row.data, h_val.size));
free(h_key.data);
free(h_val.data);
}
break; break;
case HA_READ_BEFORE_KEY: /* Find next rec. before key-record */ case HA_READ_BEFORE_KEY: /* Find next rec. before key-record */
//heaviside //heaviside
h_error = cursor->c_getf_heavi(cursor, 0, dbt_copy_heavi, &copy_info, error = cursor->c_getf_heavi(cursor, 0, dbt_copy_heavi, &copy_info,
before_key_heavi, &heavi_info, -1); before_key_heavi, &heavi_info, -1);
if (h_error==0 && copy_info.error!=0) h_error = copy_info.error; if (error==0 && copy_info.error!=0) error = copy_info.error;
//Slow
error = cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE);
if (error == 0)
error = cursor->c_get(cursor, &last_key, &row, DB_PREV);
else if (error == DB_NOTFOUND)
error = cursor->c_get(cursor, &last_key, &row, DB_LAST);
//Verify
assert(error==h_error);
if (error==0) {
assert(h_key.size == last_key.size);
assert(h_val.size == row.size);
assert(!memcmp(h_key.data, last_key.data, h_key.size));
assert(!memcmp(h_val.data, row.data, h_key.size));
free(h_key.data);
free(h_val.data);
}
break; break;
case HA_READ_KEY_OR_NEXT: /* Record or next record */ case HA_READ_KEY_OR_NEXT: /* Record or next record */
error = cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE); error = cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE);
...@@ -2798,38 +2761,9 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_ ...@@ -2798,38 +2761,9 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
break; break;
case HA_READ_PREFIX_LAST_OR_PREV: /* Last or prev key with the same prefix */ case HA_READ_PREFIX_LAST_OR_PREV: /* Last or prev key with the same prefix */
//heaviside //heaviside
h_error = cursor->c_getf_heavi(cursor, 0, dbt_copy_heavi, &copy_info, error = cursor->c_getf_heavi(cursor, 0, dbt_copy_heavi, &copy_info,
prefix_last_or_prev_heavi, &heavi_info, -1); prefix_last_or_prev_heavi, &heavi_info, -1);
if (h_error==0 && copy_info.error!=0) h_error = copy_info.error; if (error==0 && copy_info.error!=0) error = copy_info.error;
//Slow
error = cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE);
if (error == 0) {
DBT orig_key;
pack_key(&orig_key, active_index, key_buff2, key, key_len);
for (;;) {
if (tokudb_prefix_cmp_packed_key(share->key_file[active_index], &orig_key, &last_key) != 0)
break;
error = cursor->c_get(cursor, &last_key, &row, DB_NEXT_NODUP);
if (error != 0)
break;
}
if (error == 0)
error = cursor->c_get(cursor, &last_key, &row, DB_PREV);
else if (error == DB_NOTFOUND)
error = cursor->c_get(cursor, &last_key, &row, DB_LAST);
}
else if (error == DB_NOTFOUND)
error = cursor->c_get(cursor, &last_key, &row, DB_LAST);
//Verify
assert(error==h_error);
if (error==0) {
assert(h_key.size == last_key.size);
assert(h_val.size == row.size);
assert(!memcmp(h_key.data, last_key.data, h_key.size));
assert(!memcmp(h_val.data, row.data, h_key.size));
free(h_key.data);
free(h_val.data);
}
break; break;
default: default:
TOKUDB_TRACE("unsupported:%d\n", find_flag); TOKUDB_TRACE("unsupported:%d\n", find_flag);
...@@ -2840,6 +2774,8 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_ ...@@ -2840,6 +2774,8 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
if (error && (tokudb_debug & TOKUDB_DEBUG_ERROR)) if (error && (tokudb_debug & TOKUDB_DEBUG_ERROR))
TOKUDB_TRACE("error:%d:%d\n", error, find_flag); TOKUDB_TRACE("error:%d:%d\n", error, find_flag);
cleanup: cleanup:
if (last_key.data && last_key.flags==DB_DBT_REALLOC) free(last_key.data);
if (row.data && row.flags==DB_DBT_REALLOC) free(row.data);
// //
// Using dbt_copy_heavi (used with c_getf_heavi) will set // Using dbt_copy_heavi (used with c_getf_heavi) will set
// flags==DB_DBT_REALLOC. // flags==DB_DBT_REALLOC.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment