Commit f7eac23d authored by Rich Prohaska's avatar Rich Prohaska

#195 merge mariadb 10.0.9

parent 16bf5686
......@@ -404,7 +404,8 @@ static inline bool do_ignore_flag_optimization(THD* thd, TABLE* table, bool opt_
static inline uint get_key_parts(const KEY *key) {
#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
(50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
(50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799) || \
(100009 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099)
return key->user_defined_key_parts;
#else
return key->key_parts;
......@@ -2061,8 +2062,13 @@ int ha_tokudb::write_frm_data(DB* db, DB_TXN* txn, const char* frm_name) {
size_t frm_len = 0;
int error = 0;
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099
error = table_share->read_frm_image((const uchar**)&frm_data,&frm_len);
if (error) { goto cleanup; }
#else
error = readfrm(frm_name,&frm_data,&frm_len);
if (error) { goto cleanup; }
#endif
error = write_to_status(db,hatoku_frm_data,frm_data,(uint)frm_len, txn);
if (error) { goto cleanup; }
......@@ -2098,8 +2104,13 @@ int ha_tokudb::verify_frm_data(const char* frm_name, DB_TXN* txn) {
memset(&key, 0, sizeof(key));
memset(&stored_frm, 0, sizeof(&stored_frm));
// get the frm data from MySQL
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099
error = table_share->read_frm_image((const uchar**)&mysql_frm_data,&mysql_frm_len);
if (error) { goto cleanup; }
#else
error = readfrm(frm_name,&mysql_frm_data,&mysql_frm_len);
if (error) { goto cleanup; }
#endif
key.data = &curr_key;
key.size = sizeof(curr_key);
......@@ -6390,68 +6401,16 @@ THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_l
DBUG_RETURN(to);
}
static inline enum row_type compression_method_to_row_type(enum toku_compression_method method) {
switch (method) {
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
case TOKU_NO_COMPRESSION:
return ROW_TYPE_TOKU_UNCOMPRESSED;
case TOKU_ZLIB_METHOD:
case TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD:
return ROW_TYPE_TOKU_ZLIB;
case TOKU_QUICKLZ_METHOD:
return ROW_TYPE_TOKU_QUICKLZ;
case TOKU_LZMA_METHOD:
return ROW_TYPE_TOKU_LZMA;
case TOKU_FAST_COMPRESSION_METHOD:
return ROW_TYPE_TOKU_FAST;
case TOKU_SMALL_COMPRESSION_METHOD:
return ROW_TYPE_TOKU_SMALL;
#else
case TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD:
#endif
case TOKU_DEFAULT_COMPRESSION_METHOD:
return ROW_TYPE_DEFAULT;
default:
assert(false);
}
}
static enum row_type get_row_type_for_key(DB *file) {
static toku_compression_method get_compression_method(DB *file) {
enum toku_compression_method method;
int r = file->get_compression_method(file, &method);
assert(r == 0);
return compression_method_to_row_type(method);
return method;
}
#if MYSQL_VERSION_ID >= 50521
enum row_type ha_tokudb::get_row_type(void) const {
#else
enum row_thype ha_tokudb::get_row_type(void) {
#endif
return get_row_type_for_key(share->file);
}
static inline enum toku_compression_method row_type_to_compression_method(enum row_type type) {
switch (type) {
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
case ROW_TYPE_TOKU_UNCOMPRESSED:
return TOKU_NO_COMPRESSION;
case ROW_TYPE_TOKU_ZLIB:
return TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD;
case ROW_TYPE_TOKU_QUICKLZ:
return TOKU_QUICKLZ_METHOD;
case ROW_TYPE_TOKU_LZMA:
return TOKU_LZMA_METHOD;
case ROW_TYPE_TOKU_SMALL:
return TOKU_LZMA_METHOD;
case ROW_TYPE_TOKU_FAST:
return TOKU_QUICKLZ_METHOD;
#endif
default:
DBUG_PRINT("info", ("Ignoring ROW_FORMAT not used by TokuDB, using TOKUDB_ZLIB by default instead"));
case ROW_TYPE_DEFAULT:
return TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD;
}
toku_compression_method compression_method = get_compression_method(share->file);
return toku_compression_method_to_row_type(compression_method);
}
static int create_sub_table(
......@@ -6460,7 +6419,7 @@ static int create_sub_table(
DB_TXN* txn,
uint32_t block_size,
uint32_t read_block_size,
enum toku_compression_method compression_method,
toku_compression_method compression_method,
bool is_hot_index
)
{
......@@ -6686,7 +6645,7 @@ int ha_tokudb::create_secondary_dictionary(
KEY_AND_COL_INFO* kc_info,
uint32_t keynr,
bool is_hot_index,
enum row_type row_type
toku_compression_method compression_method
)
{
int error;
......@@ -6738,7 +6697,7 @@ int ha_tokudb::create_secondary_dictionary(
block_size = get_tokudb_block_size(thd);
read_block_size = get_tokudb_read_block_size(thd);
error = create_sub_table(newname, &row_descriptor, txn, block_size, read_block_size, row_type_to_compression_method(row_type), is_hot_index);
error = create_sub_table(newname, &row_descriptor, txn, block_size, read_block_size, compression_method, is_hot_index);
cleanup:
tokudb_my_free(newname);
tokudb_my_free(row_desc_buff);
......@@ -6783,7 +6742,7 @@ static uint32_t create_main_key_descriptor(
// create and close the main dictionarr with name of "name" using table form, all within
// transaction txn.
//
int ha_tokudb::create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, enum row_type row_type) {
int ha_tokudb::create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, toku_compression_method compression_method) {
int error;
DBT row_descriptor;
uchar* row_desc_buff = NULL;
......@@ -6829,36 +6788,13 @@ int ha_tokudb::create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn
read_block_size = get_tokudb_read_block_size(thd);
/* Create the main table that will hold the real rows */
error = create_sub_table(newname, &row_descriptor, txn, block_size, read_block_size, row_type_to_compression_method(row_type), false);
error = create_sub_table(newname, &row_descriptor, txn, block_size, read_block_size, compression_method, false);
cleanup:
tokudb_my_free(newname);
tokudb_my_free(row_desc_buff);
return error;
}
static inline enum row_type row_format_to_row_type(srv_row_format_t row_format) {
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
switch (row_format) {
case SRV_ROW_FORMAT_UNCOMPRESSED:
return ROW_TYPE_TOKU_UNCOMPRESSED;
case SRV_ROW_FORMAT_ZLIB:
return ROW_TYPE_TOKU_ZLIB;
case SRV_ROW_FORMAT_QUICKLZ:
return ROW_TYPE_TOKU_QUICKLZ;
case SRV_ROW_FORMAT_LZMA:
return ROW_TYPE_TOKU_LZMA;
case SRV_ROW_FORMAT_SMALL:
return ROW_TYPE_TOKU_SMALL;
case SRV_ROW_FORMAT_FAST:
return ROW_TYPE_TOKU_FAST;
case SRV_ROW_FORMAT_DEFAULT:
return ROW_TYPE_DEFAULT;
}
assert(0);
#endif
return ROW_TYPE_DEFAULT;
}
//
// Creates a new table
// Parameters:
......@@ -6882,15 +6818,19 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in
KEY_AND_COL_INFO kc_info;
tokudb_trx_data *trx = NULL;
THD* thd = ha_thd();
bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
memset(&kc_info, 0, sizeof(kc_info));
trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot);
memset(&kc_info, 0, sizeof(kc_info));
const enum row_type row_type = ((create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)
? create_info->row_type
: row_format_to_row_type(get_row_format(thd)));
#if TOKU_INCLUDE_OPTION_STRUCTS
const srv_row_format_t row_format = (srv_row_format_t) form->s->option_struct->row_format;
#else
const srv_row_format_t row_format = (create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)
? row_type_to_row_format(create_info->row_type)
: get_row_format(thd);
#endif
const toku_compression_method compression_method = row_format_to_toku_compression_method(row_format);
bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE);
if (create_from_engine) {
// table already exists, nothing to do
error = 0;
......@@ -6918,6 +6858,7 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in
newname = (char *)tokudb_my_malloc(get_max_dict_name_path_length(name),MYF(MY_WME));
if (newname == NULL){ error = ENOMEM; goto cleanup;}
trx = (tokudb_trx_data *) thd_data_get(ha_thd(), tokudb_hton->slot);
if (trx && trx->sub_sp_level && thd_sql_command(thd) == SQLCOM_CREATE_TABLE) {
txn = trx->sub_sp_level;
}
......@@ -6975,7 +6916,7 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in
);
if (error) { goto cleanup; }
error = create_main_dictionary(name, form, txn, &kc_info, row_type);
error = create_main_dictionary(name, form, txn, &kc_info, compression_method);
if (error) {
goto cleanup;
}
......@@ -6983,7 +6924,7 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in
for (uint i = 0; i < form->s->keys; i++) {
if (i != primary_key) {
error = create_secondary_dictionary(name, form, &form->key_info[i], txn, &kc_info, i, false, row_type);
error = create_secondary_dictionary(name, form, &form->key_info[i], txn, &kc_info, i, false, compression_method);
if (error) {
goto cleanup;
}
......@@ -7596,7 +7537,7 @@ int ha_tokudb::tokudb_add_index(
//
// get the row type to use for the indexes we're adding
//
const enum row_type row_type = get_row_type_for_key(share->file);
toku_compression_method compression_method = get_compression_method(share->file);
//
// status message to be shown in "show process list"
......@@ -7668,7 +7609,7 @@ int ha_tokudb::tokudb_add_index(
}
error = create_secondary_dictionary(share->table_name, table_arg, &key_info[i], txn, &share->kc_info, curr_index, creating_hot_index, row_type);
error = create_secondary_dictionary(share->table_name, table_arg, &key_info[i], txn, &share->kc_info, curr_index, creating_hot_index, compression_method);
if (error) { goto cleanup; }
error = open_secondary_dictionary(
......@@ -8062,7 +8003,7 @@ int ha_tokudb::truncate_dictionary( uint keynr, DB_TXN* txn ) {
int error;
bool is_pk = (keynr == primary_key);
const enum row_type row_type = get_row_type_for_key(share->key_file[keynr]);
toku_compression_method compression_method = get_compression_method(share->key_file[keynr]);
error = share->key_file[keynr]->close(share->key_file[keynr], 0);
assert(error == 0);
......@@ -8093,7 +8034,7 @@ int ha_tokudb::truncate_dictionary( uint keynr, DB_TXN* txn ) {
}
if (is_pk) {
error = create_main_dictionary(share->table_name, table, txn, &share->kc_info, row_type);
error = create_main_dictionary(share->table_name, table, txn, &share->kc_info, compression_method);
}
else {
error = create_secondary_dictionary(
......@@ -8104,7 +8045,7 @@ int ha_tokudb::truncate_dictionary( uint keynr, DB_TXN* txn ) {
&share->kc_info,
keynr,
false,
row_type
compression_method
);
}
if (error) { goto cleanup; }
......
......@@ -216,10 +216,10 @@ private:
TOKUDB_SHARE *share; ///< Shared lock info
#ifdef MARIADB_BASE_VERSION
// maria version of MRR
// MariaDB version of MRR
DsMrr_impl ds_mrr;
#elif 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
// maria version of MRR
// MySQL version of MRR
DsMrr_impl ds_mrr;
#endif
......@@ -469,9 +469,9 @@ private:
KEY_AND_COL_INFO* kc_info,
uint32_t keynr,
bool is_hot_index,
enum row_type row_type
toku_compression_method compression_method
);
int create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, enum row_type row_type);
int create_main_dictionary(const char* name, TABLE* form, DB_TXN* txn, KEY_AND_COL_INFO* kc_info, toku_compression_method compression_method);
void trace_create_table_info(const char *name, TABLE * form);
int is_index_unique(bool* is_unique, DB_TXN* txn, DB* db, KEY* key_info);
int is_val_unique(bool* is_unique, uchar* record, KEY* key_info, uint dict_index, DB_TXN* txn);
......@@ -626,8 +626,13 @@ public:
int cmp_ref(const uchar * ref1, const uchar * ref2);
bool check_if_incompatible_data(HA_CREATE_INFO * info, uint table_changes);
// MariaDB MRR introduced in 5.5
#ifdef MARIADB_BASE_VERSION
// MariaDB MRR introduced in 5.5, API changed in MariaDB 10.0
#if MYSQL_VERSION_ID >= 100000
#define COST_VECT Cost_estimate
#endif
int multi_range_read_init(RANGE_SEQ_IF* seq,
void* seq_init_param,
uint n_ranges, uint mode,
......@@ -640,12 +645,11 @@ public:
ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
uint key_parts, uint *bufsz,
uint *flags, COST_VECT *cost);
int multi_range_read_explain_info(uint mrr_mode,
char *str, size_t size);
#endif
int multi_range_read_explain_info(uint mrr_mode, char *str, size_t size);
#else
// MariaDB MRR introduced in 5.6
#if !defined(MARIADB_BASE_VERSION)
// MySQL MRR introduced in 5.6
#if 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
int multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
uint n_ranges, uint mode, HANDLER_BUFFER *buf);
......@@ -657,6 +661,7 @@ public:
ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
uint *bufsz, uint *flags, Cost_estimate *cost);
#endif
#endif
// ICP introduced in MariaDB 5.5
......@@ -755,11 +760,7 @@ public:
DBT* key_to_compare
);
#if MYSQL_VERSION_ID >= 50521
enum row_type get_row_type() const;
#else
enum row_type get_row_type();
#endif
private:
int read_full_row(uchar * buf);
......@@ -802,7 +803,11 @@ private:
int map_to_handler_error(int error);
};
#if defined(MARIADB_BASE_VERSION)
#if TOKU_INCLUDE_OPTION_STRUCTS
struct ha_table_option_struct {
uint row_format;
};
struct ha_index_option_struct {
bool clustering;
};
......
......@@ -482,7 +482,11 @@ bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha
assert(error == 0);
// Set the new compression
enum toku_compression_method method = row_type_to_compression_method(create_info->row_type);
#if TOKU_INCLUDE_OPTION_STRUCTS
toku_compression_method method = row_format_to_toku_compression_method((srv_row_format_t) create_info->option_struct->row_format);
#else
toku_compression_method method = row_type_to_toku_compression_method(create_info->row_type);
#endif
uint32_t curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
for (uint32_t i = 0; i < curr_num_DBs; i++) {
db = share->key_file[i];
......
......@@ -88,6 +88,7 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
/****************************************************************************
* DS-MRR implementation, essentially copied from InnoDB/MyISAM/Maria
***************************************************************************/
......@@ -95,7 +96,6 @@ PATENT RIGHTS GRANT:
/**
* Multi Range Read interface, DS-MRR calls
*/
int ha_tokudb::multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
uint n_ranges, uint mode,
HANDLER_BUFFER *buf)
......
......@@ -106,6 +106,7 @@ PATENT RIGHTS GRANT:
#if defined(MARIADB_BASE_VERSION)
#define TOKU_INCLUDE_EXTENDED_KEYS 1
#endif
#define TOKU_INCLUDE_OPTION_STRUCTS 1
#elif 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799
// mysql 5.7 with no patches
......@@ -171,6 +172,10 @@ PATENT RIGHTS GRANT:
#define HA_CAN_WRITE_DURING_OPTIMIZE 0
#endif
#if !defined(HA_OPTION_CREATE_FROM_ENGINE)
#define HA_OPTION_CREATE_FROM_ENGINE 0
#endif
// In older (< 5.5) versions of MySQL and MariaDB, it is necessary to
// use a read/write lock on the key_file array in a table share,
// because table locks do not protect the race of some thread closing
......
......@@ -126,7 +126,14 @@ typedef struct savepoint_info {
bool in_sub_stmt;
} *SP_INFO, SP_INFO_T;
#if defined(MARIADB_BASE_VERSION)
#if TOKU_INCLUDE_OPTION_STRUCTS
ha_create_table_option tokudb_table_options[] = {
HA_TOPTION_ENUM("compression", row_format,
"TOKUDB_UNCOMPRESSED,TOKUDB_ZLIB,TOKUDB_QUICKLZ,"
"TOKUDB_LZMA,TOKUDB_FAST,TOKUDB_SMALL", 0),
HA_TOPTION_END
};
ha_create_table_option tokudb_index_options[] = {
HA_IOPTION_BOOL("clustering", clustering, 0),
HA_IOPTION_END
......@@ -162,8 +169,13 @@ static int tokudb_rollback_by_xid(handlerton* hton, XID* xid);
static int tokudb_rollback_to_savepoint(handlerton * hton, THD * thd, void *savepoint);
static int tokudb_savepoint(handlerton * hton, THD * thd, void *savepoint);
static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoint);
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099
static int tokudb_discover_table(handlerton *hton, THD* thd, TABLE_SHARE *ts);
static int tokudb_discover_table_existence(handlerton *hton, const char *db, const char *name);
#endif
static int tokudb_discover(handlerton *hton, THD* thd, const char *db, const char *name, uchar **frmblob, size_t *frmlen);
static int tokudb_discover2(handlerton *hton, THD* thd, const char *db, const char *name, bool translate_name,uchar **frmblob, size_t *frmlen);
static int tokudb_discover2(handlerton *hton, THD* thd, const char *db, const char *name, bool translate_name, uchar **frmblob, size_t *frmlen);
static int tokudb_discover3(handlerton *hton, THD* thd, const char *db, const char *name, char *path, uchar **frmblob, size_t *frmlen);
handlerton *tokudb_hton;
const char *ha_tokudb_ext = ".tokudb";
......@@ -370,9 +382,14 @@ static int tokudb_init_func(void *p) {
tokudb_hton->savepoint_rollback = tokudb_rollback_to_savepoint;
tokudb_hton->savepoint_release = tokudb_release_savepoint;
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099
tokudb_hton->discover_table = tokudb_discover_table;
tokudb_hton->discover_table_existence = tokudb_discover_table_existence;
#else
tokudb_hton->discover = tokudb_discover;
#if defined(MYSQL_HANDLERTON_INCLUDE_DISCOVER2)
tokudb_hton->discover2 = tokudb_discover2;
#endif
#endif
tokudb_hton->commit = tokudb_commit;
tokudb_hton->rollback = tokudb_rollback;
......@@ -390,7 +407,8 @@ static int tokudb_init_func(void *p) {
tokudb_hton->handle_fatal_signal = tokudb_handle_fatal_signal;
#endif
#if defined(MARIADB_BASE_VERSION)
#if TOKU_INCLUDE_OPTION_STRUCTS
tokudb_hton->table_options = tokudb_table_options;
tokudb_hton->index_options = tokudb_index_options;
#endif
......@@ -921,26 +939,68 @@ static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoin
TOKUDB_DBUG_RETURN(error);
}
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099
static int tokudb_discover_table(handlerton *hton, THD* thd, TABLE_SHARE *ts) {
uchar *frmblob = 0;
size_t frmlen;
int res= tokudb_discover3(hton, thd, ts->db.str, ts->table_name.str,
ts->normalized_path.str, &frmblob, &frmlen);
if (!res)
res= ts->init_from_binary_frm_image(thd, true, frmblob, frmlen);
my_free(frmblob);
// discover_table should returns HA_ERR_NO_SUCH_TABLE for "not exists"
return res == ENOENT ? HA_ERR_NO_SUCH_TABLE : res;
}
static int tokudb_discover_table_existence(handlerton *hton, const char *db, const char *name) {
uchar *frmblob = 0;
size_t frmlen;
int res= tokudb_discover(hton, current_thd, db, name, &frmblob, &frmlen);
my_free(frmblob);
return res != ENOENT;
}
#endif
static int tokudb_discover(handlerton *hton, THD* thd, const char *db, const char *name, uchar **frmblob, size_t *frmlen) {
return tokudb_discover2(hton, thd, db, name, true, frmblob, frmlen);
}
static int tokudb_discover2(handlerton *hton, THD* thd, const char *db, const char *name, bool translate_name,
uchar **frmblob, size_t *frmlen) {
TOKUDB_DBUG_ENTER("%s %s", db, name);
char path[FN_REFLEN + 1];
build_table_filename(path, sizeof(path) - 1, db, name, "", translate_name ? 0 : FN_IS_TMP);
return tokudb_discover3(hton, thd, db, name, path, frmblob, frmlen);
}
static int tokudb_discover3(handlerton *hton, THD* thd, const char *db, const char *name, char *path,
uchar **frmblob, size_t *frmlen) {
TOKUDB_DBUG_ENTER("%s %s %s", db, name, path);
int error;
DB* status_db = NULL;
DB_TXN* txn = NULL;
char path[FN_REFLEN + 1];
HA_METADATA_KEY curr_key = hatoku_frm_data;
DBT key, value;
memset(&key, 0, sizeof(key));
memset(&value, 0, sizeof(&value));
bool do_commit;
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099
tokudb_trx_data *trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);
if (thd_sql_command(thd) == SQLCOM_CREATE_TABLE && trx && trx->sub_sp_level) {
do_commit = false;
txn = trx->sub_sp_level;
} else {
error = txn_begin(db_env, 0, &txn, 0, thd);
if (error) { goto cleanup; }
do_commit = true;
}
#else
error = txn_begin(db_env, 0, &txn, 0, thd);
if (error) { goto cleanup; }
do_commit = true;
#endif
build_table_filename(path, sizeof(path) - 1, db, name, "", translate_name ? 0 : FN_IS_TMP);
error = open_status_dictionary(&status_db, path, txn);
if (error) { goto cleanup; }
......@@ -967,7 +1027,7 @@ cleanup:
if (status_db) {
status_db->close(status_db,0);
}
if (txn) {
if (do_commit && txn) {
commit_txn(txn, 0);
}
TOKUDB_DBUG_RETURN(error);
......
......@@ -88,8 +88,8 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#ifndef _HATOKU_HTON
#define _HATOKU_HTON
#ifndef _HATOKU_HTON_H
#define _HATOKU_HTON_H
#include "db.h"
......@@ -108,6 +108,100 @@ enum srv_row_format_enum {
};
typedef enum srv_row_format_enum srv_row_format_t;
static inline srv_row_format_t toku_compression_method_to_row_format(toku_compression_method method) {
switch (method) {
case TOKU_NO_COMPRESSION:
return SRV_ROW_FORMAT_UNCOMPRESSED;
case TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD:
case TOKU_ZLIB_METHOD:
return SRV_ROW_FORMAT_ZLIB;
case TOKU_QUICKLZ_METHOD:
return SRV_ROW_FORMAT_QUICKLZ;
case TOKU_LZMA_METHOD:
return SRV_ROW_FORMAT_LZMA;
case TOKU_DEFAULT_COMPRESSION_METHOD:
return SRV_ROW_FORMAT_DEFAULT;
case TOKU_FAST_COMPRESSION_METHOD:
return SRV_ROW_FORMAT_FAST;
case TOKU_SMALL_COMPRESSION_METHOD:
return SRV_ROW_FORMAT_SMALL;
default:
assert(0);
}
}
static inline toku_compression_method row_format_to_toku_compression_method(srv_row_format_t row_format) {
switch (row_format) {
case SRV_ROW_FORMAT_UNCOMPRESSED:
return TOKU_NO_COMPRESSION;
case SRV_ROW_FORMAT_QUICKLZ:
case SRV_ROW_FORMAT_FAST:
return TOKU_QUICKLZ_METHOD;
case SRV_ROW_FORMAT_ZLIB:
case SRV_ROW_FORMAT_DEFAULT:
return TOKU_ZLIB_WITHOUT_CHECKSUM_METHOD;
case SRV_ROW_FORMAT_LZMA:
case SRV_ROW_FORMAT_SMALL:
return TOKU_LZMA_METHOD;
default:
assert(0);
}
}
static inline enum row_type row_format_to_row_type(srv_row_format_t row_format) {
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
switch (row_format) {
case SRV_ROW_FORMAT_UNCOMPRESSED:
return ROW_TYPE_TOKU_UNCOMPRESSED;
case SRV_ROW_FORMAT_ZLIB:
return ROW_TYPE_TOKU_ZLIB;
case SRV_ROW_FORMAT_QUICKLZ:
return ROW_TYPE_TOKU_QUICKLZ;
case SRV_ROW_FORMAT_LZMA:
return ROW_TYPE_TOKU_LZMA;
case SRV_ROW_FORMAT_SMALL:
return ROW_TYPE_TOKU_SMALL;
case SRV_ROW_FORMAT_FAST:
return ROW_TYPE_TOKU_FAST;
case SRV_ROW_FORMAT_DEFAULT:
return ROW_TYPE_DEFAULT;
}
#endif
return ROW_TYPE_DEFAULT;
}
static inline srv_row_format_t row_type_to_row_format(enum row_type type) {
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
switch (type) {
case ROW_TYPE_TOKU_UNCOMPRESSED:
return SRV_ROW_FORMAT_UNCOMPRESSED;
case ROW_TYPE_TOKU_ZLIB:
return SRV_ROW_FORMAT_ZLIB;
case ROW_TYPE_TOKU_QUICKLZ:
return SRV_ROW_FORMAT_QUICKLZ;
case ROW_TYPE_TOKU_LZMA:
return SRV_ROW_FORMAT_LZMA;
case ROW_TYPE_TOKU_SMALL:
return SRV_ROW_FORMAT_SMALL;
case ROW_TYPE_TOKU_FAST:
return SRV_ROW_FORMAT_FAST;
case ROW_TYPE_DEFAULT:
return SRV_ROW_FORMAT_DEFAULT;
default:
return SRV_ROW_FORMAT_DEFAULT;
}
#endif
return SRV_ROW_FORMAT_DEFAULT;
}
static inline enum row_type toku_compression_method_to_row_type(toku_compression_method method) {
return row_format_to_row_type(toku_compression_method_to_row_format(method));
}
static inline toku_compression_method row_type_to_toku_compression_method(enum row_type type) {
return row_format_to_toku_compression_method(row_type_to_row_format(type));
}
// thread variables
static MYSQL_THDVAR_BOOL(commit_sync,
......@@ -338,7 +432,7 @@ static MYSQL_THDVAR_ENUM(row_format, PLUGIN_VAR_OPCMDARG,
"TOKUDB_LZMA, TOKUDB_FAST, TOKUDB_SMALL and TOKUDB_DEFAULT",
NULL, NULL, SRV_ROW_FORMAT_ZLIB, &tokudb_row_format_typelib);
static srv_row_format_t get_row_format(THD *thd) {
static inline srv_row_format_t get_row_format(THD *thd) {
return (srv_row_format_t) THDVAR(thd, row_format);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment