Commit e60066eb authored by Rich Prohaska's avatar Rich Prohaska Committed by Yoni Fogel

refs #4476 build tokudb on mysql-5.6.9-rc

git-svn-id: file:///svn/mysql/tokudb-engine/tokudb-engine@51085 c7de825b-a66e-492c-adef-691d508d4ae1
parent 1fb4dfce
......@@ -274,6 +274,14 @@ static inline bool do_ignore_flag_optimization(THD* thd, TABLE* table, bool opt_
);
}
static inline uint get_key_parts(const KEY *key) {
#if 50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
return key->usable_key_parts;
#else
return key->key_parts;
#endif
}
ulonglong ha_tokudb::table_flags() const {
return (table && do_ignore_flag_optimization(ha_thd(), table, share->replace_into_fast) ?
int_table_flags | HA_BINLOG_STMT_CAPABLE :
......@@ -628,7 +636,7 @@ void set_key_filter(MY_BITMAP* key_filter, KEY* key, TABLE* table, bool get_offs
FILTER_KEY_PART_INFO parts[MAX_REF_PARTS];
uint curr_skip_index = 0;
for (uint i = 0; i < key->key_parts; i++) {
for (uint i = 0; i < get_key_parts(key); i++) {
//
// horrendous hack due to bugs in mysql, basically
// we cannot always reliably get the offset from the same source
......@@ -638,7 +646,7 @@ void set_key_filter(MY_BITMAP* key_filter, KEY* key, TABLE* table, bool get_offs
}
qsort(
parts, // start of array
key->key_parts, //num elements
get_key_parts(key), //num elements
sizeof(*parts), //size of each element
filter_key_part_compare
);
......@@ -646,7 +654,7 @@ void set_key_filter(MY_BITMAP* key_filter, KEY* key, TABLE* table, bool get_offs
for (uint i = 0; i < table->s->fields; i++) {
Field* field = table->field[i];
uint curr_field_offset = field_offset(field, table);
if (curr_skip_index < key->key_parts) {
if (curr_skip_index < get_key_parts(key)) {
uint curr_skip_offset = 0;
curr_skip_offset = parts[curr_skip_index].offset;
if (curr_skip_offset == curr_field_offset) {
......@@ -1582,7 +1590,7 @@ bool ha_tokudb::can_replace_into_be_fast(TABLE_SHARE* table_share, KEY_AND_COL_I
for (uint curr_index = 0; curr_index < table_share->keys; curr_index++) {
if (curr_index == pk) continue;
KEY* curr_key_info = &table_share->key_info[curr_index];
for (uint i = 0; i < curr_key_info->key_parts; i++) {
for (uint i = 0; i < get_key_parts(curr_key_info); i++) {
uint16 curr_field_index = curr_key_info->key_part[i].field->field_index;
if (!bitmap_is_set(&kc_info->key_filters[curr_index],curr_field_index)) {
ret_val = false;
......@@ -1708,7 +1716,7 @@ int ha_tokudb::initialize_share(
//
ref_length = sizeof(uint32_t) + sizeof(uchar);
KEY_PART_INFO *key_part = table->key_info[primary_key].key_part;
KEY_PART_INFO *end = key_part + table->key_info[primary_key].key_parts;
KEY_PART_INFO *end = key_part + get_key_parts(&table->key_info[primary_key]);
for (; key_part != end; key_part++) {
ref_length += key_part->field->max_packed_col_length(key_part->length);
}
......@@ -2554,7 +2562,7 @@ uint32_t ha_tokudb::place_key_into_mysql_buff(
uchar* data
)
{
KEY_PART_INFO *key_part = key_info->key_part, *end = key_part + key_info->key_parts;
KEY_PART_INFO *key_part = key_info->key_part, *end = key_part + get_key_parts(key_info);
uchar *pos = data;
for (; key_part != end; key_part++) {
......@@ -2623,7 +2631,7 @@ uint32_t ha_tokudb::place_key_into_dbt_buff(
)
{
KEY_PART_INFO *key_part = key_info->key_part;
KEY_PART_INFO *end = key_part + key_info->key_parts;
KEY_PART_INFO *end = key_part + get_key_parts(key_info);
uchar* curr_buff = buff;
*has_null = false;
for (; key_part != end && key_length > 0; key_part++) {
......@@ -2808,7 +2816,7 @@ DBT *ha_tokudb::pack_key(
TOKUDB_DBUG_ENTER("ha_tokudb::pack_key");
KEY *key_info = &table->key_info[keynr];
KEY_PART_INFO *key_part = key_info->key_part;
KEY_PART_INFO *end = key_part + key_info->key_parts;
KEY_PART_INFO *end = key_part + get_key_parts(key_info);
my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
memset((void *) key, 0, sizeof(*key));
......@@ -5731,7 +5739,7 @@ int ha_tokudb::info(uint flag) {
for (uint i = 0; i < table_share->keys; i++) {
bool is_unique_key = (i == primary_key) || (table->key_info[i].flags & HA_NOSAME);
ulong val = (is_unique_key) ? 1 : 0;
table->key_info[i].rec_per_key[table->key_info[i].key_parts - 1] = val;
table->key_info[i].rec_per_key[get_key_parts(&table->key_info[i]) - 1] = val;
}
}
/* Don't return key if we got an error for the internal primary key */
......@@ -6359,9 +6367,9 @@ void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) {
}
for (i = 0; i < form->s->keys; i++) {
KEY *key = &form->s->key_info[i];
TOKUDB_TRACE("key:%d:%s:%d\n", i, key->name, key->key_parts);
TOKUDB_TRACE("key:%d:%s:%d\n", i, key->name, get_key_parts(key));
uint p;
for (p = 0; p < key->key_parts; p++) {
for (p = 0; p < get_key_parts(key); p++) {
KEY_PART_INFO *key_part = &key->key_part[p];
Field *field = key_part->field;
TOKUDB_TRACE("key:%d:%d:length=%d:%s:type=%d:flags=%x\n",
......
......@@ -398,7 +398,7 @@ int ha_tokudb::alter_table_add_index(TABLE *altered_table, Alter_inplace_info *h
for (uint i = 0; i < ha_alter_info->index_add_count; i++) {
KEY *key = &key_info[i];
*key = ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]];
for (KEY_PART_INFO *key_part= key->key_part; key_part < key->key_part + key->key_parts; key_part++)
for (KEY_PART_INFO *key_part= key->key_part; key_part < key->key_part + get_key_parts(key); key_part++)
key_part->field = table->field[key_part->fieldnr];
}
......@@ -665,7 +665,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(TABLE *altered_table, Alter_in
// Return true if a field is part of a key
static bool field_in_key(KEY *key, Field *field) {
for (uint i = 0; i < key->key_parts; i++) {
for (uint i = 0; i < get_key_parts(key); i++) {
KEY_PART_INFO *key_part = &key->key_part[i];
if (strcmp(key_part->field->field_name, field->field_name) == 0)
return true;
......
......@@ -41,8 +41,8 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print
if (print_error) {
sql_print_error(
"keys disagree on if they are clustering, %d, %d",
curr_orig_key->key_parts,
curr_altered_key->key_parts
get_key_parts(curr_orig_key),
get_key_parts(curr_altered_key)
);
}
retval = false;
......@@ -52,19 +52,19 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print
if (print_error) {
sql_print_error(
"keys disagree on if they are unique, %d, %d",
curr_orig_key->key_parts,
curr_altered_key->key_parts
get_key_parts(curr_orig_key),
get_key_parts(curr_altered_key)
);
}
retval = false;
goto cleanup;
}
if (curr_orig_key->key_parts != curr_altered_key->key_parts) {
if (get_key_parts(curr_orig_key) != get_key_parts(curr_altered_key)) {
if (print_error) {
sql_print_error(
"keys have different number of parts, %d, %d",
curr_orig_key->key_parts,
curr_altered_key->key_parts
get_key_parts(curr_orig_key),
get_key_parts(curr_altered_key)
);
}
retval = false;
......@@ -73,7 +73,7 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print
//
// now verify that each field in the key is the same
//
for (uint32_t j = 0; j < curr_orig_key->key_parts; j++) {
for (uint32_t j = 0; j < get_key_parts(curr_orig_key); j++) {
KEY_PART_INFO* curr_orig_part = &curr_orig_key->key_part[j];
KEY_PART_INFO* curr_altered_part = &curr_altered_key->key_part[j];
Field* curr_orig_field = curr_orig_part->field;
......
......@@ -917,7 +917,7 @@ int create_toku_key_descriptor_for_key(KEY* key, uchar* buf) {
uchar* pos = buf;
uint32_t num_bytes_in_field = 0;
uint32_t charset_num = 0;
for (uint i = 0; i < key->key_parts; i++){
for (uint i = 0; i < get_key_parts(key); i++){
Field* field = key->key_part[i].field;
//
// The first byte states if there is a null byte
......@@ -1677,7 +1677,7 @@ uint32_t pack_desc_pk_offset_info(
bool is_constant_offset = true;
uint32_t offset = 0;
for (uint i = 0; i < prim_key->key_parts; i++) {
for (uint i = 0; i < get_key_parts(prim_key); i++) {
KEY_PART_INFO curr = prim_key->key_part[i];
uint16 curr_field_index = curr.field->field_index;
......@@ -2302,8 +2302,8 @@ uint32_t create_toku_secondary_key_pack_descriptor (
//
// store number of parts
//
assert(prim_key->key_parts < 128);
pos[0] = 2*prim_key->key_parts;
assert(get_key_parts(prim_key) < 128);
pos[0] = 2 * get_key_parts(prim_key);
pos++;
//
// for each part, store if it is a fixed field or var field
......@@ -2313,7 +2313,7 @@ uint32_t create_toku_secondary_key_pack_descriptor (
//
pk_info = pos;
uchar* tmp = pos;
for (uint i = 0; i < prim_key->key_parts; i++) {
for (uint i = 0; i < get_key_parts(prim_key); i++) {
tmp += pack_desc_pk_info(
tmp,
kc_info,
......@@ -2324,11 +2324,11 @@ uint32_t create_toku_secondary_key_pack_descriptor (
//
// asserting that we moved forward as much as we think we have
//
assert(tmp - pos == (2*prim_key->key_parts));
assert(tmp - pos == (2 * get_key_parts(prim_key)));
pos = tmp;
}
for (uint i = 0; i < key_info->key_parts; i++) {
for (uint i = 0; i < get_key_parts(key_info); i++) {
KEY_PART_INFO curr_kpi = key_info->key_part[i];
uint16 field_index = curr_kpi.field->field_index;
Field* field = table_share->field[field_index];
......
......@@ -37,7 +37,15 @@
#define TOKU_PARTITION_WRITE_FRM_DATA 0
#define TOKU_INCLUDE_WRITE_FRM_DATA 0
#elif 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
#elif 50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
#define TOKU_INCLUDE_ALTER_56 1
#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 0
#define TOKU_INCLUDE_XA 0
#define TOKU_PARTITION_WRITE_FRM_DATA 0
#define TOKU_INCLUDE_WRITE_FRM_DATA 0
#define TOKU_INCLUDE_UPSERT 0
#elif 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50608
#define TOKU_INCLUDE_ALTER_56 1
#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1
#define TOKU_INCLUDE_XA 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment