Commit 935f78ac authored by Rich Prohaska's avatar Rich Prohaska

#92 add key_is_clustering accessor

parent f0629146
...@@ -408,7 +408,7 @@ ulong ha_tokudb::index_flags(uint idx, uint part, bool all_parts) const { ...@@ -408,7 +408,7 @@ ulong ha_tokudb::index_flags(uint idx, uint part, bool all_parts) const {
#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) #if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
flags |= HA_DO_INDEX_COND_PUSHDOWN; flags |= HA_DO_INDEX_COND_PUSHDOWN;
#endif #endif
if (table_share->key_info[idx].flags & HA_CLUSTERING) { if (key_is_clustering(&table_share->key_info[idx])) {
flags |= HA_CLUSTERED_INDEX; flags |= HA_CLUSTERED_INDEX;
} }
DBUG_RETURN(flags); DBUG_RETURN(flags);
...@@ -1658,7 +1658,7 @@ static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, K ...@@ -1658,7 +1658,7 @@ static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, K
} }
} }
} }
if (i == primary_key || table_share->key_info[i].flags & HA_CLUSTERING) { if (i == primary_key || key_is_clustering(&table_share->key_info[i])) {
error = initialize_col_pack_info(kc_info,table_share,i); error = initialize_col_pack_info(kc_info,table_share,i);
if (error) { if (error) {
goto exit; goto exit;
...@@ -3817,7 +3817,7 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) { ...@@ -3817,7 +3817,7 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) {
// //
// test key packing of clustering keys // test key packing of clustering keys
// //
if (table->key_info[keynr].flags & HA_CLUSTERING) { if (key_is_clustering(&table->key_info[keynr])) {
error = pack_row(&row, (const uchar *) record, keynr); error = pack_row(&row, (const uchar *) record, keynr);
assert(error == 0); assert(error == 0);
uchar* tmp_buff = NULL; uchar* tmp_buff = NULL;
...@@ -4444,7 +4444,7 @@ void ha_tokudb::set_query_columns(uint keynr) { ...@@ -4444,7 +4444,7 @@ void ha_tokudb::set_query_columns(uint keynr) {
key_index = primary_key; key_index = primary_key;
} }
else { else {
key_index = (table->key_info[keynr].flags & HA_CLUSTERING ? keynr : primary_key); key_index = (key_is_clustering(&table->key_info[keynr]) ? keynr : primary_key);
} }
for (uint i = 0; i < table_share->fields; i++) { for (uint i = 0; i < table_share->fields; i++) {
if (bitmap_is_set(table->read_set,i) || if (bitmap_is_set(table->read_set,i) ||
...@@ -4779,7 +4779,7 @@ int ha_tokudb::read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT con ...@@ -4779,7 +4779,7 @@ int ha_tokudb::read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT con
// //
// case where we read from secondary table that is not clustered // case where we read from secondary table that is not clustered
// //
if (keynr != primary_key && !(table->key_info[keynr].flags & HA_CLUSTERING)) { if (keynr != primary_key && !key_is_clustering(&table->key_info[keynr])) {
bool has_null; bool has_null;
// //
// create a DBT that has the same data as row, this is inefficient // create a DBT that has the same data as row, this is inefficient
...@@ -4993,7 +4993,7 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_ ...@@ -4993,7 +4993,7 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
break; break;
} }
error = handle_cursor_error(error,HA_ERR_KEY_NOT_FOUND,tokudb_active_index); error = handle_cursor_error(error,HA_ERR_KEY_NOT_FOUND,tokudb_active_index);
if (!error && !key_read && tokudb_active_index != primary_key && !(table->key_info[tokudb_active_index].flags & HA_CLUSTERING)) { if (!error && !key_read && tokudb_active_index != primary_key && !key_is_clustering(&table->key_info[tokudb_active_index])) {
error = read_full_row(buf); error = read_full_row(buf);
} }
...@@ -5398,7 +5398,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) { ...@@ -5398,7 +5398,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) {
// key // key
need_val = (this->key_read == 0) && need_val = (this->key_read == 0) &&
(tokudb_active_index == primary_key || (tokudb_active_index == primary_key ||
table->key_info[tokudb_active_index].flags & HA_CLUSTERING key_is_clustering(&table->key_info[tokudb_active_index])
); );
if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0) { if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0) {
...@@ -5478,7 +5478,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) { ...@@ -5478,7 +5478,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) {
// main table. // main table.
// //
if (!error && !key_read && (tokudb_active_index != primary_key) && !(table->key_info[tokudb_active_index].flags & HA_CLUSTERING) ) { if (!error && !key_read && (tokudb_active_index != primary_key) && !key_is_clustering(&table->key_info[tokudb_active_index])) {
error = read_full_row(buf); error = read_full_row(buf);
} }
trx->stmt_progress.queried++; trx->stmt_progress.queried++;
...@@ -5559,7 +5559,7 @@ int ha_tokudb::index_first(uchar * buf) { ...@@ -5559,7 +5559,7 @@ int ha_tokudb::index_first(uchar * buf) {
// still need to get entire contents of the row if operation done on // still need to get entire contents of the row if operation done on
// secondary DB and it was NOT a covering index // secondary DB and it was NOT a covering index
// //
if (!error && !key_read && (tokudb_active_index != primary_key) && !(table->key_info[tokudb_active_index].flags & HA_CLUSTERING) ) { if (!error && !key_read && (tokudb_active_index != primary_key) && !key_is_clustering(&table->key_info[tokudb_active_index])) {
error = read_full_row(buf); error = read_full_row(buf);
} }
trx->stmt_progress.queried++; trx->stmt_progress.queried++;
...@@ -5601,7 +5601,7 @@ int ha_tokudb::index_last(uchar * buf) { ...@@ -5601,7 +5601,7 @@ int ha_tokudb::index_last(uchar * buf) {
// still need to get entire contents of the row if operation done on // still need to get entire contents of the row if operation done on
// secondary DB and it was NOT a covering index // secondary DB and it was NOT a covering index
// //
if (!error && !key_read && (tokudb_active_index != primary_key) && !(table->key_info[tokudb_active_index].flags & HA_CLUSTERING) ) { if (!error && !key_read && (tokudb_active_index != primary_key) && !key_is_clustering(&table->key_info[tokudb_active_index])) {
error = read_full_row(buf); error = read_full_row(buf);
} }
...@@ -6754,7 +6754,7 @@ static uint32_t create_secondary_key_descriptor( ...@@ -6754,7 +6754,7 @@ static uint32_t create_secondary_key_descriptor(
form->s, form->s,
kc_info, kc_info,
keynr, keynr,
key_info->flags & HA_CLUSTERING key_is_clustering(key_info)
); );
return ptr - buf; return ptr - buf;
} }
...@@ -7342,7 +7342,7 @@ double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows) ...@@ -7342,7 +7342,7 @@ double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows)
{ {
TOKUDB_DBUG_ENTER("ha_tokudb::keyread_time"); TOKUDB_DBUG_ENTER("ha_tokudb::keyread_time");
double ret_val; double ret_val;
if ((table->key_info[index].flags & HA_CLUSTERING) || (index == primary_key)) { if (index == primary_key || key_is_clustering(&table->key_info[index])) {
ret_val = read_time(index, ranges, rows); ret_val = read_time(index, ranges, rows);
DBUG_RETURN(ret_val); DBUG_RETURN(ret_val);
} }
...@@ -7392,7 +7392,7 @@ double ha_tokudb::read_time( ...@@ -7392,7 +7392,7 @@ double ha_tokudb::read_time(
goto cleanup; goto cleanup;
} }
is_clustering = (table->key_info[index].flags & HA_CLUSTERING); is_clustering = key_is_clustering(&table->key_info[index]);
// //
...@@ -7757,7 +7757,7 @@ int ha_tokudb::tokudb_add_index( ...@@ -7757,7 +7757,7 @@ int ha_tokudb::tokudb_add_index(
curr_index = curr_num_DBs; curr_index = curr_num_DBs;
*modified_DBs = true; *modified_DBs = true;
for (uint i = 0; i < num_of_keys; i++, curr_index++) { for (uint i = 0; i < num_of_keys; i++, curr_index++) {
if (key_info[i].flags & HA_CLUSTERING) { if (key_is_clustering(&key_info[i])) {
set_key_filter( set_key_filter(
&share->kc_info.key_filters[curr_index], &share->kc_info.key_filters[curr_index],
&key_info[i], &key_info[i],
......
...@@ -787,5 +787,9 @@ class ha_tokudb : public handler { ...@@ -787,5 +787,9 @@ class ha_tokudb : public handler {
#endif #endif
}; };
static inline bool key_is_clustering(const KEY *key) {
return key->flags & HA_CLUSTERING;
}
#endif #endif
...@@ -632,7 +632,7 @@ int ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplac ...@@ -632,7 +632,7 @@ int ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplac
if (error) if (error)
goto cleanup; goto cleanup;
if (i == primary_key || table_share->key_info[i].flags & HA_CLUSTERING) { if (i == primary_key || key_is_clustering(&table_share->key_info[i])) {
num_column_extra = fill_row_mutator( num_column_extra = fill_row_mutator(
column_extra, column_extra,
columns, columns,
...@@ -757,7 +757,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(TABLE *altered_table, Alter_in ...@@ -757,7 +757,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(TABLE *altered_table, Alter_in
break; break;
// for all trees that have values, make an update variable offsets message and broadcast it into the tree // for all trees that have values, make an update variable offsets message and broadcast it into the tree
if (i == primary_key || (table_share->key_info[i].flags & HA_CLUSTERING)) { if (i == primary_key || key_is_clustering(&table_share->key_info[i])) {
uint32_t offset_start = table_share->null_bytes + share->kc_info.mcp_info[i].fixed_field_size; uint32_t offset_start = table_share->null_bytes + share->kc_info.mcp_info[i].fixed_field_size;
uint32_t offset_end = offset_start + share->kc_info.mcp_info[i].len_of_offsets; uint32_t offset_end = offset_start + share->kc_info.mcp_info[i].len_of_offsets;
uint32_t number_of_offsets = offset_end - offset_start; uint32_t number_of_offsets = offset_end - offset_start;
...@@ -939,7 +939,7 @@ int ha_tokudb::alter_table_expand_one_column(TABLE *altered_table, Alter_inplace ...@@ -939,7 +939,7 @@ int ha_tokudb::alter_table_expand_one_column(TABLE *altered_table, Alter_inplace
break; break;
// for all trees that have values, make an expand update message and broadcast it into the tree // for all trees that have values, make an expand update message and broadcast it into the tree
if (i == primary_key || (table_share->key_info[i].flags & HA_CLUSTERING)) { if (i == primary_key || key_is_clustering(&table_share->key_info[i])) {
uint32_t old_offset = alter_table_field_offset(table_share->null_bytes, ctx->table_kc_info, i, expand_field_num); uint32_t old_offset = alter_table_field_offset(table_share->null_bytes, ctx->table_kc_info, i, expand_field_num);
uint32_t new_offset = alter_table_field_offset(table_share->null_bytes, ctx->altered_table_kc_info, i, expand_field_num); uint32_t new_offset = alter_table_field_offset(table_share->null_bytes, ctx->altered_table_kc_info, i, expand_field_num);
assert(old_offset <= new_offset); assert(old_offset <= new_offset);
...@@ -1018,7 +1018,7 @@ int ha_tokudb::alter_table_expand_blobs(TABLE *altered_table, Alter_inplace_info ...@@ -1018,7 +1018,7 @@ int ha_tokudb::alter_table_expand_blobs(TABLE *altered_table, Alter_inplace_info
break; break;
// for all trees that have values, make an update blobs message and broadcast it into the tree // for all trees that have values, make an update blobs message and broadcast it into the tree
if (i == primary_key || (table_share->key_info[i].flags & HA_CLUSTERING)) { if (i == primary_key || key_is_clustering(&table_share->key_info[i])) {
tokudb::buffer b; tokudb::buffer b;
uint8_t op = UPDATE_OP_EXPAND_BLOB; uint8_t op = UPDATE_OP_EXPAND_BLOB;
b.append(&op, sizeof op); b.append(&op, sizeof op);
......
...@@ -126,7 +126,7 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print ...@@ -126,7 +126,7 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print
retval = false; retval = false;
goto cleanup; goto cleanup;
} }
if (((curr_orig_key->flags & HA_CLUSTERING) == 0) != ((curr_altered_key->flags & HA_CLUSTERING) == 0)) { if (key_is_clustering(curr_orig_key) != key_is_clustering(curr_altered_key)) {
if (print_error) { if (print_error) {
sql_print_error( sql_print_error(
"keys disagree on if they are clustering, %d, %d", "keys disagree on if they are clustering, %d, %d",
......
...@@ -538,7 +538,7 @@ static bool check_point_update(Item *conds, TABLE *table) { ...@@ -538,7 +538,7 @@ static bool check_point_update(Item *conds, TABLE *table) {
// Precompute this when the table is opened. // Precompute this when the table is opened.
static bool clustering_keys_exist(TABLE *table) { static bool clustering_keys_exist(TABLE *table) {
for (uint keynr = 0; keynr < table->s->keys; keynr++) { for (uint keynr = 0; keynr < table->s->keys; keynr++) {
if (keynr != table->s->primary_key && (table->s->key_info[keynr].flags & HA_CLUSTERING)) if (keynr != table->s->primary_key && key_is_clustering(&table->s->key_info[keynr]))
return true; return true;
} }
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment