Commit 30d9d4e2 authored by Sergei Golubchik's avatar Sergei Golubchik

5.6.29-76.2

parent 9a957a5b
SET(TOKUDB_VERSION 5.6.28-76.1) SET(TOKUDB_VERSION 5.6.29-76.2)
# PerconaFT only supports x86-64 and cmake-2.8.9+ # PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND
NOT CMAKE_VERSION VERSION_LESS "2.8.9") NOT CMAKE_VERSION VERSION_LESS "2.8.9")
......
This diff is collapsed.
...@@ -61,9 +61,9 @@ typedef struct loader_context { ...@@ -61,9 +61,9 @@ typedef struct loader_context {
class TOKUDB_SHARE { class TOKUDB_SHARE {
public: public:
enum share_state_t { enum share_state_t {
CLOSED, CLOSED = 0,
OPENED, OPENED = 1,
ERROR ERROR = 2
}; };
// one time, start up init // one time, start up init
...@@ -88,6 +88,9 @@ class TOKUDB_SHARE { ...@@ -88,6 +88,9 @@ class TOKUDB_SHARE {
// exactly 0 _use_count // exactly 0 _use_count
static void drop_share(TOKUDB_SHARE* share); static void drop_share(TOKUDB_SHARE* share);
// returns state string for logging/reporting
static const char* get_state_string(share_state_t state);
void* operator new(size_t sz); void* operator new(size_t sz);
void operator delete(void* p); void operator delete(void* p);
...@@ -306,7 +309,6 @@ class TOKUDB_SHARE { ...@@ -306,7 +309,6 @@ class TOKUDB_SHARE {
// cardinality counts // cardinality counts
uint32_t _rec_per_keys; uint32_t _rec_per_keys;
uint64_t* _rec_per_key; uint64_t* _rec_per_key;
bool _card_changed;
void init(const char* table_name); void init(const char* table_name);
void destroy(); void destroy();
...@@ -315,17 +317,34 @@ inline int TOKUDB_SHARE::use_count() const { ...@@ -315,17 +317,34 @@ inline int TOKUDB_SHARE::use_count() const {
return _use_count; return _use_count;
} }
inline void TOKUDB_SHARE::lock() const { inline void TOKUDB_SHARE::lock() const {
TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
_full_table_name.ptr(),
get_state_string(_state),
_use_count);
_mutex.lock(); _mutex.lock();
TOKUDB_SHARE_DBUG_VOID_RETURN();
} }
inline void TOKUDB_SHARE::unlock() const { inline void TOKUDB_SHARE::unlock() const {
TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
_full_table_name.ptr(),
get_state_string(_state),
_use_count);
_mutex.unlock(); _mutex.unlock();
TOKUDB_SHARE_DBUG_VOID_RETURN();
} }
inline TOKUDB_SHARE::share_state_t TOKUDB_SHARE::state() const { inline TOKUDB_SHARE::share_state_t TOKUDB_SHARE::state() const {
return _state; return _state;
} }
inline void TOKUDB_SHARE::set_state(TOKUDB_SHARE::share_state_t state) { inline void TOKUDB_SHARE::set_state(TOKUDB_SHARE::share_state_t state) {
TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]:new_state[%s]",
_full_table_name.ptr(),
get_state_string(_state),
_use_count,
get_state_string(state));
assert_debug(_mutex.is_owned_by_me()); assert_debug(_mutex.is_owned_by_me());
_state = state; _state = state;
TOKUDB_SHARE_DBUG_VOID_RETURN();
} }
inline const char* TOKUDB_SHARE::full_table_name() const { inline const char* TOKUDB_SHARE::full_table_name() const {
return _full_table_name.ptr(); return _full_table_name.ptr();
...@@ -346,6 +365,13 @@ inline uint TOKUDB_SHARE::table_name_length() const { ...@@ -346,6 +365,13 @@ inline uint TOKUDB_SHARE::table_name_length() const {
return _table_name.length(); return _table_name.length();
} }
inline void TOKUDB_SHARE::set_row_count(uint64_t rows, bool locked) { inline void TOKUDB_SHARE::set_row_count(uint64_t rows, bool locked) {
TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]:rows[%" PRIu64 "]:locked[%d]",
_full_table_name.ptr(),
get_state_string(_state),
_use_count,
rows,
locked);
if (!locked) { if (!locked) {
lock(); lock();
} else { } else {
...@@ -358,6 +384,7 @@ inline void TOKUDB_SHARE::set_row_count(uint64_t rows, bool locked) { ...@@ -358,6 +384,7 @@ inline void TOKUDB_SHARE::set_row_count(uint64_t rows, bool locked) {
if (!locked) { if (!locked) {
unlock(); unlock();
} }
TOKUDB_SHARE_DBUG_VOID_RETURN();
} }
inline ha_rows TOKUDB_SHARE::row_count() const { inline ha_rows TOKUDB_SHARE::row_count() const {
return _rows; return _rows;
...@@ -371,7 +398,6 @@ inline void TOKUDB_SHARE::init_cardinality_counts( ...@@ -371,7 +398,6 @@ inline void TOKUDB_SHARE::init_cardinality_counts(
assert_always(_rec_per_key == NULL && _rec_per_keys == 0); assert_always(_rec_per_key == NULL && _rec_per_keys == 0);
_rec_per_keys = rec_per_keys; _rec_per_keys = rec_per_keys;
_rec_per_key = rec_per_key; _rec_per_key = rec_per_key;
_card_changed = true;
} }
inline void TOKUDB_SHARE::update_cardinality_counts( inline void TOKUDB_SHARE::update_cardinality_counts(
uint32_t rec_per_keys, uint32_t rec_per_keys,
...@@ -382,7 +408,6 @@ inline void TOKUDB_SHARE::update_cardinality_counts( ...@@ -382,7 +408,6 @@ inline void TOKUDB_SHARE::update_cardinality_counts(
assert_always(rec_per_keys == _rec_per_keys); assert_always(rec_per_keys == _rec_per_keys);
assert_always(rec_per_key != NULL); assert_always(rec_per_key != NULL);
memcpy(_rec_per_key, rec_per_key, _rec_per_keys * sizeof(uint64_t)); memcpy(_rec_per_key, rec_per_key, _rec_per_keys * sizeof(uint64_t));
_card_changed = true;
} }
inline void TOKUDB_SHARE::disallow_auto_analysis() { inline void TOKUDB_SHARE::disallow_auto_analysis() {
assert_debug(_mutex.is_owned_by_me()); assert_debug(_mutex.is_owned_by_me());
......
...@@ -374,6 +374,7 @@ void standard_t::on_run() { ...@@ -374,6 +374,7 @@ void standard_t::on_run() {
_local_txn = false; _local_txn = false;
} }
assert_always(_share->key_file[0] != NULL);
_result = _share->key_file[0]->stat64(_share->key_file[0], _txn, &stat64); _result = _share->key_file[0]->stat64(_share->key_file[0], _txn, &stat64);
if (_result != 0) { if (_result != 0) {
_result = HA_ADMIN_FAILED; _result = HA_ADMIN_FAILED;
...@@ -575,6 +576,7 @@ int standard_t::analyze_key_progress(void) { ...@@ -575,6 +576,7 @@ int standard_t::analyze_key_progress(void) {
int standard_t::analyze_key(uint64_t* rec_per_key_part) { int standard_t::analyze_key(uint64_t* rec_per_key_part) {
int error = 0; int error = 0;
DB* db = _share->key_file[_current_key]; DB* db = _share->key_file[_current_key];
assert_always(db != NULL);
uint64_t num_key_parts = _share->_key_descriptors[_current_key]._parts; uint64_t num_key_parts = _share->_key_descriptors[_current_key]._parts;
uint64_t unique_rows[num_key_parts]; uint64_t unique_rows[num_key_parts];
bool is_unique = _share->_key_descriptors[_current_key]._is_unique; bool is_unique = _share->_key_descriptors[_current_key]._is_unique;
...@@ -897,6 +899,7 @@ int ha_tokudb::do_optimize(THD* thd) { ...@@ -897,6 +899,7 @@ int ha_tokudb::do_optimize(THD* thd) {
} }
DB* db = share->key_file[i]; DB* db = share->key_file[i];
assert_always(db != NULL);
error = db->optimize(db); error = db->optimize(db);
if (error) { if (error) {
goto cleanup; goto cleanup;
...@@ -1016,7 +1019,8 @@ int ha_tokudb::check(THD* thd, HA_CHECK_OPT* check_opt) { ...@@ -1016,7 +1019,8 @@ int ha_tokudb::check(THD* thd, HA_CHECK_OPT* check_opt) {
write_status_msg); write_status_msg);
} }
for (uint i = 0; i < num_DBs; i++) { for (uint i = 0; i < num_DBs; i++) {
DB *db = share->key_file[i]; DB* db = share->key_file[i];
assert_always(db != NULL);
const char* kname = const char* kname =
i == primary_key ? "primary" : table_share->key_info[i].name; i == primary_key ? "primary" : table_share->key_info[i].name;
snprintf( snprintf(
......
...@@ -680,7 +680,7 @@ int ha_tokudb::alter_table_add_index( ...@@ -680,7 +680,7 @@ int ha_tokudb::alter_table_add_index(
KEY *key = &key_info[i]; KEY *key = &key_info[i];
*key = ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]]; *key = ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]];
for (KEY_PART_INFO* key_part = key->key_part; for (KEY_PART_INFO* key_part = key->key_part;
key_part < key->key_part + get_key_parts(key); key_part < key->key_part + key->user_defined_key_parts;
key_part++) { key_part++) {
key_part->field = table->field[key_part->fieldnr]; key_part->field = table->field[key_part->fieldnr];
} }
...@@ -1123,7 +1123,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets( ...@@ -1123,7 +1123,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(
// Return true if a field is part of a key // Return true if a field is part of a key
static bool field_in_key(KEY *key, Field *field) { static bool field_in_key(KEY *key, Field *field) {
for (uint i = 0; i < get_key_parts(key); i++) { for (uint i = 0; i < key->user_defined_key_parts; i++) {
KEY_PART_INFO *key_part = &key->key_part[i]; KEY_PART_INFO *key_part = &key->key_part[i];
if (strcmp(key_part->field->field_name, field->field_name) == 0) if (strcmp(key_part->field->field_name, field->field_name) == 0)
return true; return true;
......
...@@ -75,8 +75,8 @@ static bool tables_have_same_keys( ...@@ -75,8 +75,8 @@ static bool tables_have_same_keys(
if (print_error) { if (print_error) {
sql_print_error( sql_print_error(
"keys disagree on if they are clustering, %d, %d", "keys disagree on if they are clustering, %d, %d",
get_key_parts(curr_orig_key), curr_orig_key->user_defined_key_parts,
get_key_parts(curr_altered_key)); curr_altered_key->user_defined_key_parts);
} }
retval = false; retval = false;
goto cleanup; goto cleanup;
...@@ -86,18 +86,19 @@ static bool tables_have_same_keys( ...@@ -86,18 +86,19 @@ static bool tables_have_same_keys(
if (print_error) { if (print_error) {
sql_print_error( sql_print_error(
"keys disagree on if they are unique, %d, %d", "keys disagree on if they are unique, %d, %d",
get_key_parts(curr_orig_key), curr_orig_key->user_defined_key_parts,
get_key_parts(curr_altered_key)); curr_altered_key->user_defined_key_parts);
} }
retval = false; retval = false;
goto cleanup; goto cleanup;
} }
if (get_key_parts(curr_orig_key) != get_key_parts(curr_altered_key)) { if (curr_orig_key->user_defined_key_parts !=
curr_altered_key->user_defined_key_parts) {
if (print_error) { if (print_error) {
sql_print_error( sql_print_error(
"keys have different number of parts, %d, %d", "keys have different number of parts, %d, %d",
get_key_parts(curr_orig_key), curr_orig_key->user_defined_key_parts,
get_key_parts(curr_altered_key)); curr_altered_key->user_defined_key_parts);
} }
retval = false; retval = false;
goto cleanup; goto cleanup;
...@@ -105,7 +106,7 @@ static bool tables_have_same_keys( ...@@ -105,7 +106,7 @@ static bool tables_have_same_keys(
// //
// now verify that each field in the key is the same // now verify that each field in the key is the same
// //
for (uint32_t j = 0; j < get_key_parts(curr_orig_key); j++) { for (uint32_t j = 0; j < curr_orig_key->user_defined_key_parts; j++) {
KEY_PART_INFO* curr_orig_part = &curr_orig_key->key_part[j]; KEY_PART_INFO* curr_orig_part = &curr_orig_key->key_part[j];
KEY_PART_INFO* curr_altered_part = &curr_altered_key->key_part[j]; KEY_PART_INFO* curr_altered_part = &curr_altered_key->key_part[j];
Field* curr_orig_field = curr_orig_part->field; Field* curr_orig_field = curr_orig_part->field;
......
...@@ -453,7 +453,7 @@ static bool check_all_update_expressions( ...@@ -453,7 +453,7 @@ static bool check_all_update_expressions(
static bool full_field_in_key(TABLE* table, Field* field) { static bool full_field_in_key(TABLE* table, Field* field) {
assert_always(table->s->primary_key < table->s->keys); assert_always(table->s->primary_key < table->s->keys);
KEY* key = &table->s->key_info[table->s->primary_key]; KEY* key = &table->s->key_info[table->s->primary_key];
for (uint i = 0; i < get_key_parts(key); i++) { for (uint i = 0; i < key->user_defined_key_parts; i++) {
KEY_PART_INFO* key_part = &key->key_part[i]; KEY_PART_INFO* key_part = &key->key_part[i];
if (strcmp(field->field_name, key_part->field->field_name) == 0) { if (strcmp(field->field_name, key_part->field->field_name) == 0) {
return key_part->length == field->field_length; return key_part->length == field->field_length;
...@@ -519,7 +519,7 @@ static bool check_point_update(Item* conds, TABLE* table) { ...@@ -519,7 +519,7 @@ static bool check_point_update(Item* conds, TABLE* table) {
if (bitmap_init(&pk_fields, NULL, table->s->fields, FALSE)) // 1 -> failure if (bitmap_init(&pk_fields, NULL, table->s->fields, FALSE)) // 1 -> failure
return false; return false;
KEY *key = &table->s->key_info[table->s->primary_key]; KEY *key = &table->s->key_info[table->s->primary_key];
for (uint i = 0; i < get_key_parts(key); i++) for (uint i = 0; i < key->user_defined_key_parts; i++)
bitmap_set_bit(&pk_fields, key->key_part[i].field->field_index); bitmap_set_bit(&pk_fields, key->key_part[i].field->field_index);
switch (conds->type()) { switch (conds->type()) {
......
...@@ -1010,7 +1010,7 @@ static int create_toku_key_descriptor_for_key(KEY* key, uchar* buf) { ...@@ -1010,7 +1010,7 @@ static int create_toku_key_descriptor_for_key(KEY* key, uchar* buf) {
uchar* pos = buf; uchar* pos = buf;
uint32_t num_bytes_in_field = 0; uint32_t num_bytes_in_field = 0;
uint32_t charset_num = 0; uint32_t charset_num = 0;
for (uint i = 0; i < get_key_parts(key); i++){ for (uint i = 0; i < key->user_defined_key_parts; i++) {
Field* field = key->key_part[i].field; Field* field = key->key_part[i].field;
// //
// The first byte states if there is a null byte // The first byte states if there is a null byte
...@@ -1881,7 +1881,7 @@ static uint32_t pack_desc_pk_offset_info( ...@@ -1881,7 +1881,7 @@ static uint32_t pack_desc_pk_offset_info(
bool is_constant_offset = true; bool is_constant_offset = true;
uint32_t offset = 0; uint32_t offset = 0;
for (uint i = 0; i < get_key_parts(prim_key); i++) { for (uint i = 0; i < prim_key->user_defined_key_parts; i++) {
KEY_PART_INFO curr = prim_key->key_part[i]; KEY_PART_INFO curr = prim_key->key_part[i];
uint16 curr_field_index = curr.field->field_index; uint16 curr_field_index = curr.field->field_index;
...@@ -2503,8 +2503,8 @@ static uint32_t create_toku_secondary_key_pack_descriptor ( ...@@ -2503,8 +2503,8 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
// //
// store number of parts // store number of parts
// //
assert_always(get_key_parts(prim_key) < 128); assert_always(prim_key->user_defined_key_parts < 128);
pos[0] = 2 * get_key_parts(prim_key); pos[0] = 2 * prim_key->user_defined_key_parts;
pos++; pos++;
// //
// for each part, store if it is a fixed field or var field // for each part, store if it is a fixed field or var field
...@@ -2514,7 +2514,7 @@ static uint32_t create_toku_secondary_key_pack_descriptor ( ...@@ -2514,7 +2514,7 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
// //
pk_info = pos; pk_info = pos;
uchar* tmp = pos; uchar* tmp = pos;
for (uint i = 0; i < get_key_parts(prim_key); i++) { for (uint i = 0; i < prim_key->user_defined_key_parts; i++) {
tmp += pack_desc_pk_info( tmp += pack_desc_pk_info(
tmp, tmp,
kc_info, kc_info,
...@@ -2525,11 +2525,11 @@ static uint32_t create_toku_secondary_key_pack_descriptor ( ...@@ -2525,11 +2525,11 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
// //
// asserting that we moved forward as much as we think we have // asserting that we moved forward as much as we think we have
// //
assert_always(tmp - pos == (2 * get_key_parts(prim_key))); assert_always(tmp - pos == (2 * prim_key->user_defined_key_parts));
pos = tmp; pos = tmp;
} }
for (uint i = 0; i < get_key_parts(key_info); i++) { for (uint i = 0; i < key_info->user_defined_key_parts; i++) {
KEY_PART_INFO curr_kpi = key_info->key_part[i]; KEY_PART_INFO curr_kpi = key_info->key_part[i];
uint16 field_index = curr_kpi.field->field_index; uint16 field_index = curr_kpi.field->field_index;
Field* field = table_share->field[field_index]; Field* field = table_share->field[field_index];
......
...@@ -36,10 +36,8 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. ...@@ -36,10 +36,8 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "sql_class.h" #include "sql_class.h"
#include "sql_show.h" #include "sql_show.h"
#include "discover.h" #include "discover.h"
#if (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
#include <binlog.h> #include <binlog.h>
#endif #include "debug_sync.h"
#undef PACKAGE #undef PACKAGE
#undef VERSION #undef VERSION
......
...@@ -674,6 +674,7 @@ int tokudb_end(handlerton* hton, ha_panic_function type) { ...@@ -674,6 +674,7 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
// count the total number of prepared txn's that we discard // count the total number of prepared txn's that we discard
long total_prepared = 0; long total_prepared = 0;
#if TOKU_INCLUDE_XA #if TOKU_INCLUDE_XA
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "begin XA cleanup");
while (1) { while (1) {
// get xid's // get xid's
const long n_xid = 1; const long n_xid = 1;
...@@ -698,6 +699,7 @@ int tokudb_end(handlerton* hton, ha_panic_function type) { ...@@ -698,6 +699,7 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
} }
total_prepared += n_prepared; total_prepared += n_prepared;
} }
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "end XA cleanup");
#endif #endif
error = db_env->close( error = db_env->close(
db_env, db_env,
...@@ -922,19 +924,25 @@ static int tokudb_rollback(handlerton * hton, THD * thd, bool all) { ...@@ -922,19 +924,25 @@ static int tokudb_rollback(handlerton * hton, THD * thd, bool all) {
#if TOKU_INCLUDE_XA #if TOKU_INCLUDE_XA
static bool tokudb_sync_on_prepare(void) { static bool tokudb_sync_on_prepare(void) {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
// skip sync of log if fsync log period > 0 // skip sync of log if fsync log period > 0
if (tokudb::sysvars::fsync_log_period > 0) if (tokudb::sysvars::fsync_log_period > 0) {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit");
return false; return false;
else } else {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit");
return true; return true;
}
} }
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) { static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
TOKUDB_DBUG_ENTER(""); TOKUDB_DBUG_ENTER("");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0; int r = 0;
// if tokudb_support_xa is disable, just return // if tokudb_support_xa is disable, just return
if (!tokudb::sysvars::support_xa(thd)) { if (!tokudb::sysvars::support_xa(thd)) {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r); TOKUDB_DBUG_RETURN(r);
} }
...@@ -944,7 +952,7 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) { ...@@ -944,7 +952,7 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
if (txn) { if (txn) {
uint32_t syncflag = tokudb_sync_on_prepare() ? 0 : DB_TXN_NOSYNC; uint32_t syncflag = tokudb_sync_on_prepare() ? 0 : DB_TXN_NOSYNC;
TOKUDB_TRACE_FOR_FLAGS( TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_TXN, TOKUDB_DEBUG_XA,
"doing txn prepare:%d:%p", "doing txn prepare:%d:%p",
all, all,
txn); txn);
...@@ -957,15 +965,18 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) { ...@@ -957,15 +965,18 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
// test hook to induce a crash on a debug build // test hook to induce a crash on a debug build
DBUG_EXECUTE_IF("tokudb_crash_prepare_after", DBUG_SUICIDE();); DBUG_EXECUTE_IF("tokudb_crash_prepare_after", DBUG_SUICIDE(););
} else { } else {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_TXN, "nothing to prepare %d", all); TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "nothing to prepare %d", all);
} }
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r); TOKUDB_DBUG_RETURN(r);
} }
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) { static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) {
TOKUDB_DBUG_ENTER(""); TOKUDB_DBUG_ENTER("");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0; int r = 0;
if (len == 0 || xid_list == NULL) { if (len == 0 || xid_list == NULL) {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", 0);
TOKUDB_DBUG_RETURN(0); TOKUDB_DBUG_RETURN(0);
} }
long num_returned = 0; long num_returned = 0;
...@@ -976,11 +987,13 @@ static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) { ...@@ -976,11 +987,13 @@ static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) {
&num_returned, &num_returned,
DB_NEXT); DB_NEXT);
assert_always(r == 0); assert_always(r == 0);
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %ld", num_returned);
TOKUDB_DBUG_RETURN((int)num_returned); TOKUDB_DBUG_RETURN((int)num_returned);
} }
static int tokudb_commit_by_xid(handlerton* hton, XID* xid) { static int tokudb_commit_by_xid(handlerton* hton, XID* xid) {
TOKUDB_DBUG_ENTER(""); TOKUDB_DBUG_ENTER("");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0; int r = 0;
DB_TXN* txn = NULL; DB_TXN* txn = NULL;
TOKU_XA_XID* toku_xid = (TOKU_XA_XID*)xid; TOKU_XA_XID* toku_xid = (TOKU_XA_XID*)xid;
...@@ -993,11 +1006,13 @@ static int tokudb_commit_by_xid(handlerton* hton, XID* xid) { ...@@ -993,11 +1006,13 @@ static int tokudb_commit_by_xid(handlerton* hton, XID* xid) {
r = 0; r = 0;
cleanup: cleanup:
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r); TOKUDB_DBUG_RETURN(r);
} }
static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) { static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) {
TOKUDB_DBUG_ENTER(""); TOKUDB_DBUG_ENTER("");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0; int r = 0;
DB_TXN* txn = NULL; DB_TXN* txn = NULL;
TOKU_XA_XID* toku_xid = (TOKU_XA_XID*)xid; TOKU_XA_XID* toku_xid = (TOKU_XA_XID*)xid;
...@@ -1010,6 +1025,7 @@ static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) { ...@@ -1010,6 +1025,7 @@ static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) {
r = 0; r = 0;
cleanup: cleanup:
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r); TOKUDB_DBUG_RETURN(r);
} }
......
...@@ -199,14 +199,4 @@ void tokudb_pretty_left_key(const DB* db, const DBT* key, String* out); ...@@ -199,14 +199,4 @@ void tokudb_pretty_left_key(const DB* db, const DBT* key, String* out);
void tokudb_pretty_right_key(const DB* db, const DBT* key, String* out); void tokudb_pretty_right_key(const DB* db, const DBT* key, String* out);
const char *tokudb_get_index_name(DB* db); const char *tokudb_get_index_name(DB* db);
inline uint get_key_parts(const KEY *key) {
#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
(50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799) || \
(100009 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099)
return key->user_defined_key_parts;
#else
return key->key_parts;
#endif
}
#endif //#ifdef _HATOKU_HTON #endif //#ifdef _HATOKU_HTON
...@@ -14,5 +14,5 @@ select * from t; ...@@ -14,5 +14,5 @@ select * from t;
a b a b
select TABLE_ROWS from information_schema.tables where table_schema='test' and table_name='t'; select TABLE_ROWS from information_schema.tables where table_schema='test' and table_name='t';
TABLE_ROWS TABLE_ROWS
1 0
drop table t; drop table t;
...@@ -17,5 +17,5 @@ test.t analyze status OK ...@@ -17,5 +17,5 @@ test.t analyze status OK
show indexes from t; show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 7 NULL NULL BTREE t 0 PRIMARY 1 id A 7 NULL NULL BTREE
t 1 x 1 x A 7 NULL NULL YES BTREE t 1 x 1 x A 3 NULL NULL YES BTREE
drop table t; drop table t;
...@@ -15,7 +15,7 @@ test.t analyze status OK ...@@ -15,7 +15,7 @@ test.t analyze status OK
show indexes from t; show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 5 NULL NULL BTREE t 0 PRIMARY 1 id A 5 NULL NULL BTREE
t 1 x 1 x A 5 NULL NULL YES BTREE t 1 x 1 x A 2 NULL NULL YES BTREE
t 1 y 1 y A 5 NULL NULL YES BTREE t 1 y 1 y A 5 NULL NULL YES BTREE
alter table t analyze partition p1; alter table t analyze partition p1;
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
...@@ -23,13 +23,13 @@ test.t analyze status OK ...@@ -23,13 +23,13 @@ test.t analyze status OK
show indexes from t; show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 5 NULL NULL BTREE t 0 PRIMARY 1 id A 5 NULL NULL BTREE
t 1 x 1 x A 5 NULL NULL YES BTREE t 1 x 1 x A 2 NULL NULL YES BTREE
t 1 y 1 y A 5 NULL NULL YES BTREE t 1 y 1 y A 5 NULL NULL YES BTREE
insert into t values (100,1,1),(200,2,1),(300,3,1),(400,4,1),(500,5,1); insert into t values (100,1,1),(200,2,1),(300,3,1),(400,4,1),(500,5,1);
show indexes from t; show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 9 NULL NULL BTREE t 0 PRIMARY 1 id A 9 NULL NULL BTREE
t 1 x 1 x A 9 NULL NULL YES BTREE t 1 x 1 x A 4 NULL NULL YES BTREE
t 1 y 1 y A 9 NULL NULL YES BTREE t 1 y 1 y A 9 NULL NULL YES BTREE
alter table t analyze partition p0; alter table t analyze partition p0;
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
......
drop table if exists t1;
set @orig_table_open_cache = @@global.table_open_cache;
create table t1(a int) engine = tokudb partition by key(a) partitions 2 (partition p0 engine = tokudb, partition p1 engine = tokudb);
lock tables t1 read;
set @@global.table_open_cache = 1;
begin;
insert into t1 values(1),(1);
select * from t1 where c like _ucs2 0x039C0025 collate ucs2_unicode_ci;
ERROR 42S22: Unknown column 'c' in 'where clause'
create table t1(c1 binary (1), c2 varbinary(1));
ERROR 42S01: Table 't1' already exists
unlock tables;
drop table t1;
set @@global.table_open_cache = @orig_table_open_cache;
set @orig_auto_analyze = @@session.tokudb_auto_analyze;
set @orig_in_background = @@session.tokudb_analyze_in_background;
set @orig_mode = @@session.tokudb_analyze_mode;
set @orig_throttle = @@session.tokudb_analyze_throttle;
set @orig_time = @@session.tokudb_analyze_time;
set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
set @orig_default_storage_engine = @@session.default_storage_engine;
set @orig_pause_background_job_manager = @@global.tokudb_debug_pause_background_job_manager;
set session default_storage_engine = 'tokudb';
set session tokudb_auto_analyze = 1;
set session tokudb_analyze_in_background = 1;
set session tokudb_analyze_mode = tokudb_analyze_standard;
set session tokudb_analyze_throttle = 0;
set session tokudb_analyze_time = 0;
set global tokudb_cardinality_scale_percent = DEFAULT;
set global tokudb_debug_pause_background_job_manager = TRUE;
create table t1 (a int not null auto_increment, b int, c int, primary key(a), key kb(b), key kc(c), key kabc(a,b,c), key kab(a,b), key kbc(b,c));
insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3);
select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
database_name table_name job_type job_params scheduler
test t1 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; AUTO
set DEBUG_SYNC = 'tokudb_after_truncate_all_dictionarys SIGNAL closed WAIT_FOR done';
TRUNCATE TABLE t1;
set global tokudb_debug_pause_background_job_manager = FALSE;
set DEBUG_SYNC = 'now SIGNAL done';
drop table t1;
set session tokudb_auto_analyze = @orig_auto_analyze;
set session tokudb_analyze_in_background = @orig_in_background;
set session tokudb_analyze_mode = @orig_mode;
set session tokudb_analyze_throttle = @orig_throttle;
set session tokudb_analyze_time = @orig_time;
set global tokudb_cardinality_scale_percent = @orig_scale_percent;
set session default_storage_engine = @orig_default_storage_engine;
set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
# test DB-917
# test that table/share open lock timeout does not crash the server on subsequent access
source include/have_tokudb.inc;
disable_warnings;
drop table if exists t1;
enable_warnings;
set @orig_table_open_cache = @@global.table_open_cache;
create table t1(a int) engine = tokudb partition by key(a) partitions 2 (partition p0 engine = tokudb, partition p1 engine = tokudb);
lock tables t1 read;
set @@global.table_open_cache = 1;
begin;
insert into t1 values(1),(1);
# when the bug is present, this results in a lock wait timeout
--error ER_BAD_FIELD_ERROR
select * from t1 where c like _ucs2 0x039C0025 collate ucs2_unicode_ci;
# when the bug exists, this results in the assertion
# kc_info->cp_info[keynr] == NULL in tokudb/ha_tokudb.cc initialize_col_pack_info
--error ER_TABLE_EXISTS_ERROR
create table t1(c1 binary (1), c2 varbinary(1));
unlock tables;
drop table t1;
set @@global.table_open_cache = @orig_table_open_cache;
# This test for DB-938 tests a race condition where a scheduled background job
# (analyze) ends up operating on a set of DB* key_file[] in TOKUDB_SHARE that
# were set to NULL during a TRUNCATE TABLE operation.
-- source include/have_tokudb.inc
-- source include/have_debug.inc
-- source include/have_debug_sync.inc
-- enable_query_log
set @orig_auto_analyze = @@session.tokudb_auto_analyze;
set @orig_in_background = @@session.tokudb_analyze_in_background;
set @orig_mode = @@session.tokudb_analyze_mode;
set @orig_throttle = @@session.tokudb_analyze_throttle;
set @orig_time = @@session.tokudb_analyze_time;
set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
set @orig_default_storage_engine = @@session.default_storage_engine;
set @orig_pause_background_job_manager = @@global.tokudb_debug_pause_background_job_manager;
# first, lets set up to auto analyze in the background with about any activity
set session default_storage_engine = 'tokudb';
set session tokudb_auto_analyze = 1;
set session tokudb_analyze_in_background = 1;
set session tokudb_analyze_mode = tokudb_analyze_standard;
set session tokudb_analyze_throttle = 0;
set session tokudb_analyze_time = 0;
set global tokudb_cardinality_scale_percent = DEFAULT;
# in debug build, we can prevent the background job manager from running,
# let's do it to hold a job from running until we get the TRUNCATE TABLE
# in action
set global tokudb_debug_pause_background_job_manager = TRUE;
create table t1 (a int not null auto_increment, b int, c int, primary key(a), key kb(b), key kc(c), key kabc(a,b,c), key kab(a,b), key kbc(b,c));
insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3);
# insert above should have triggered an analyze, but since the bjm is paused,
# we will see it sitting in the queue
select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
# lets flip to another connection
connect(conn1, localhost, root);
# set up the DEBUG_SYNC point
set DEBUG_SYNC = 'tokudb_after_truncate_all_dictionarys SIGNAL closed WAIT_FOR done';
# send the truncat table
send TRUNCATE TABLE t1;
# back to default connection
connection default;
# release the bjm
set global tokudb_debug_pause_background_job_manager = FALSE;
# if the bug is present, the bjm should crash here within 1/4 of a second
sleep 5;
# lets release and clean up
set DEBUG_SYNC = 'now SIGNAL done';
connection conn1;
reap;
connection default;
disconnect conn1;
drop table t1;
set session tokudb_auto_analyze = @orig_auto_analyze;
set session tokudb_analyze_in_background = @orig_in_background;
set session tokudb_analyze_mode = @orig_mode;
set session tokudb_analyze_throttle = @orig_throttle;
set session tokudb_analyze_time = @orig_time;
set global tokudb_cardinality_scale_percent = @orig_scale_percent;
set session default_storage_engine = @orig_default_storage_engine;
set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
-- source include/have_innodb.inc -- source include/have_innodb.inc
-- source include/have_tokudb.inc -- source include/have_tokudb.inc
-- source include/have_debug.inc -- source include/have_debug.inc
# Valgrind would report memory leaks on the intentional crashes
-- source include/not_valgrind.inc
# Embedded server does not support crashing
-- source include/not_embedded.inc
# Avoid CrashReporter popup on Mac
-- source include/not_crashrep.inc
--disable_warnings --disable_warnings
drop table if exists t1, t2; drop table if exists t1, t2;
......
-- source include/have_innodb.inc -- source include/have_innodb.inc
-- source include/have_tokudb.inc -- source include/have_tokudb.inc
-- source include/have_debug.inc -- source include/have_debug.inc
# Valgrind would report memory leaks on the intentional crashes
-- source include/not_valgrind.inc
# Embedded server does not support crashing
-- source include/not_embedded.inc
# Avoid CrashReporter popup on Mac
-- source include/not_crashrep.inc
--disable_warnings --disable_warnings
drop table if exists t1, t2; drop table if exists t1, t2;
......
--source include/have_tokudb.inc --source include/have_tokudb.inc
--source include/have_debug.inc --source include/have_debug.inc
# Valgrind would report memory leaks on the intentional crashes
-- source include/not_valgrind.inc
# Embedded server does not support crashing
-- source include/not_embedded.inc
# Avoid CrashReporter popup on Mac
-- source include/not_crashrep.inc
--disable_warnings --disable_warnings
drop table if exists t1; drop table if exists t1;
......
...@@ -27,7 +27,7 @@ namespace tokudb { ...@@ -27,7 +27,7 @@ namespace tokudb {
uint compute_total_key_parts(TABLE_SHARE *table_share) { uint compute_total_key_parts(TABLE_SHARE *table_share) {
uint total_key_parts = 0; uint total_key_parts = 0;
for (uint i = 0; i < table_share->keys; i++) { for (uint i = 0; i < table_share->keys; i++) {
total_key_parts += get_key_parts(&table_share->key_info[i]); total_key_parts += table_share->key_info[i].user_defined_key_parts;
} }
return total_key_parts; return total_key_parts;
} }
...@@ -156,13 +156,14 @@ namespace tokudb { ...@@ -156,13 +156,14 @@ namespace tokudb {
uint orig_key_parts = 0; uint orig_key_parts = 0;
for (uint i = 0; i < table_share->keys; i++) { for (uint i = 0; i < table_share->keys; i++) {
orig_key_offset[i] = orig_key_parts; orig_key_offset[i] = orig_key_parts;
orig_key_parts += get_key_parts(&table_share->key_info[i]); orig_key_parts += table_share->key_info[i].user_defined_key_parts;
} }
// if orig card data exists, then use it to compute new card data // if orig card data exists, then use it to compute new card data
if (error == 0) { if (error == 0) {
uint next_key_parts = 0; uint next_key_parts = 0;
for (uint i = 0; error == 0 && i < altered_table_share->keys; i++) { for (uint i = 0; error == 0 && i < altered_table_share->keys; i++) {
uint ith_key_parts = get_key_parts(&altered_table_share->key_info[i]); uint ith_key_parts =
altered_table_share->key_info[i].user_defined_key_parts;
uint orig_key_index; uint orig_key_index;
if (find_index_of_key( if (find_index_of_key(
altered_table_share->key_info[i].name, altered_table_share->key_info[i].name,
......
...@@ -50,6 +50,8 @@ static void tokudb_backtrace(void); ...@@ -50,6 +50,8 @@ static void tokudb_backtrace(void);
#define TOKUDB_DEBUG_UPSERT (1<<12) #define TOKUDB_DEBUG_UPSERT (1<<12)
#define TOKUDB_DEBUG_CHECK (1<<13) #define TOKUDB_DEBUG_CHECK (1<<13)
#define TOKUDB_DEBUG_ANALYZE (1<<14) #define TOKUDB_DEBUG_ANALYZE (1<<14)
#define TOKUDB_DEBUG_XA (1<<15)
#define TOKUDB_DEBUG_SHARE (1<<16)
#define TOKUDB_TRACE(_fmt, ...) { \ #define TOKUDB_TRACE(_fmt, ...) { \
fprintf(stderr, "%u %s:%u %s " _fmt "\n", tokudb::thread::my_tid(), \ fprintf(stderr, "%u %s:%u %s " _fmt "\n", tokudb::thread::my_tid(), \
...@@ -124,7 +126,6 @@ static void tokudb_backtrace(void); ...@@ -124,7 +126,6 @@ static void tokudb_backtrace(void);
DBUG_RETURN(r); \ DBUG_RETURN(r); \
} }
#define TOKUDB_HANDLER_DBUG_VOID_RETURN { \ #define TOKUDB_HANDLER_DBUG_VOID_RETURN { \
if (TOKUDB_UNLIKELY(tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN)) { \ if (TOKUDB_UNLIKELY(tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN)) { \
TOKUDB_HANDLER_TRACE("return"); \ TOKUDB_HANDLER_TRACE("return"); \
...@@ -132,6 +133,61 @@ static void tokudb_backtrace(void); ...@@ -132,6 +133,61 @@ static void tokudb_backtrace(void);
DBUG_VOID_RETURN; \ DBUG_VOID_RETURN; \
} }
#define TOKUDB_SHARE_TRACE(_fmt, ...) \
fprintf(stderr, "%u %p %s:%u TOUDB_SHARE::%s " _fmt "\n", \
tokudb::thread::my_tid(), this, __FILE__, __LINE__, \
__FUNCTION__, ##__VA_ARGS__);
#define TOKUDB_SHARE_TRACE_FOR_FLAGS(_flags, _fmt, ...) { \
if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(_flags))) { \
TOKUDB_SHARE_TRACE(_fmt, ##__VA_ARGS__); \
} \
}
#define TOKUDB_SHARE_DBUG_ENTER(_fmt, ...) { \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_ENTER) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
TOKUDB_SHARE_TRACE(_fmt, ##__VA_ARGS__); \
} \
} \
DBUG_ENTER(__FUNCTION__);
#define TOKUDB_SHARE_DBUG_RETURN(r) { \
int rr = (r); \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE) || \
(rr != 0 && (tokudb::sysvars::debug & TOKUDB_DEBUG_ERROR)))) { \
TOKUDB_SHARE_TRACE("return %d", rr); \
} \
DBUG_RETURN(rr); \
}
#define TOKUDB_SHARE_DBUG_RETURN_DOUBLE(r) { \
double rr = (r); \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
TOKUDB_SHARE_TRACE("return %f", rr); \
} \
DBUG_RETURN(rr); \
}
#define TOKUDB_SHARE_DBUG_RETURN_PTR(r) { \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
TOKUDB_SHARE_TRACE("return 0x%p", r); \
} \
DBUG_RETURN(r); \
}
#define TOKUDB_SHARE_DBUG_VOID_RETURN() { \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
TOKUDB_SHARE_TRACE("return"); \
} \
DBUG_VOID_RETURN; \
}
#define TOKUDB_DBUG_DUMP(s, p, len) \ #define TOKUDB_DBUG_DUMP(s, p, len) \
{ \ { \
TOKUDB_TRACE("%s", s); \ TOKUDB_TRACE("%s", s); \
......
...@@ -1119,9 +1119,9 @@ void background_job_status_callback( ...@@ -1119,9 +1119,9 @@ void background_job_status_callback(
table->field[3]->store(type, strlen(type), system_charset_info); table->field[3]->store(type, strlen(type), system_charset_info);
table->field[4]->store(params, strlen(params), system_charset_info); table->field[4]->store(params, strlen(params), system_charset_info);
if (user_scheduled) if (user_scheduled)
table->field[5]->store("USER", sizeof("USER"), system_charset_info); table->field[5]->store("USER", strlen("USER"), system_charset_info);
else else
table->field[5]->store("AUTO", sizeof("AUTO"), system_charset_info); table->field[5]->store("AUTO", strlen("AUTO"), system_charset_info);
field_store_time_t(table->field[6], scheduled_time); field_store_time_t(table->field[6], scheduled_time);
field_store_time_t(table->field[7], started_time); field_store_time_t(table->field[7], started_time);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment