Commit e4b13a31 authored by Rich Prohaska's avatar Rich Prohaska

Merge branch 'master' into releases/tokudb-7.5

parents 1549f0bf 4c57196e
......@@ -24,14 +24,14 @@ working MySQL or MariaDB with Tokutek patches, and with the TokuDB storage
engine, called `make.mysql.bash`. This script will download copies of the
needed source code from github and build everything.
To build MySQL 5.5.39 with TokuDB 7.5.0:
To build MySQL 5.5.39 with TokuDB 7.5.2:
```sh
scripts/make.mysql.bash --mysqlbuild=mysql-5.5.39-tokudb-7.5.0-linux-x86_64
scripts/make.mysql.bash --mysqlbuild=mysql-5.5.39-tokudb-7.5.2-linux-x86_64
```
To build MariaDB 5.5.39 with TokuDB 7.5.0:
To build MariaDB 5.5.39 with TokuDB 7.5.2:
```sh
scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.39-tokudb-7.5.0-linux-x86_64
scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.39-tokudb-7.5.2-linux-x86_64
```
Before you start, make sure you have a C++11-compatible compiler (GCC >=
......
drop table if exists t1,t2;
CREATE TABLE t1(`a` INT) ENGINE=TokuDB;
CREATE TABLE t2(`a` INT) ENGINE=InnoDB;
begin;
insert into t1 values (0);
insert into t2 values (0);
commit;
begin;
insert into t1 values (1);
insert into t2 values (1);
commit;
include/diff_tables.inc [test.t1, test.t2]
drop table t1,t2;
source include/have_tokudb.inc;
source include/have_innodb.inc;
disable_warnings;
drop table if exists t1,t2;
enable_warnings;
CREATE TABLE t1(`a` INT) ENGINE=TokuDB;
CREATE TABLE t2(`a` INT) ENGINE=InnoDB;
let $n=0;
while ($n < 2) {
begin;
eval insert into t1 values ($n);
eval insert into t2 values ($n);
commit;
inc $n;
}
let $diff_tables= test.t1, test.t2;
source include/diff_tables.inc;
drop table t1,t2;
......@@ -4485,6 +4485,11 @@ int ha_tokudb::index_init(uint keynr, bool sorted) {
}
tokudb_active_index = keynr;
#if TOKU_CLUSTERING_IS_COVERING
if (keynr < table->s->keys && table->key_info[keynr].option_struct->clustering)
key_read = false;
#endif
last_cursor_error = 0;
range_lock_grabbed = false;
range_lock_grabbed_null = false;
......@@ -5834,11 +5839,14 @@ void ha_tokudb::position(const uchar * record) {
// 0, always success
//
int ha_tokudb::info(uint flag) {
TOKUDB_HANDLER_DBUG_ENTER("%d %lld", flag, (long long) share->rows);
int error;
TOKUDB_HANDLER_DBUG_ENTER("%d", flag);
int error = 0;
#if TOKU_CLUSTERING_IS_COVERING
for (uint i=0; i < table->s->keys; i++)
if (key_is_clustering(&table->key_info[i]))
table->covering_keys.set_bit(i);
#endif
DB_TXN* txn = NULL;
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
DB_BTREE_STAT64 dict_stats;
if (flag & HA_STATUS_VARIABLE) {
// Just to get optimizations right
stats.records = share->rows + share->rows_from_locked_table;
......@@ -5868,18 +5876,12 @@ int ha_tokudb::info(uint flag) {
else {
goto cleanup;
}
error = share->file->get_fragmentation(
share->file,
&frag_info
);
error = share->file->get_fragmentation(share->file, &frag_info);
if (error) { goto cleanup; }
stats.delete_length = frag_info.unused_bytes;
error = share->file->stat64(
share->file,
txn,
&dict_stats
);
DB_BTREE_STAT64 dict_stats;
error = share->file->stat64(share->file, txn, &dict_stats);
if (error) { goto cleanup; }
stats.create_time = dict_stats.bt_create_time_sec;
......@@ -5915,6 +5917,7 @@ int ha_tokudb::info(uint flag) {
//
// this solution is much simpler than trying to maintain an
// accurate number of valid keys at the handlerton layer.
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
for (uint i = 0; i < curr_num_DBs; i++) {
// skip the primary key, skip dropped indexes
if (i == primary_key || share->key_file[i] == NULL) {
......@@ -6365,10 +6368,12 @@ static toku_compression_method get_compression_method(DB *file) {
return method;
}
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
enum row_type ha_tokudb::get_row_type(void) const {
toku_compression_method compression_method = get_compression_method(share->file);
return toku_compression_method_to_row_type(compression_method);
}
#endif
static int create_sub_table(
const char *table_name,
......@@ -6444,16 +6449,16 @@ void ha_tokudb::update_create_info(HA_CREATE_INFO* create_info) {
create_info->auto_increment_value = stats.auto_increment_value;
}
}
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
if (!(create_info->used_fields & HA_CREATE_USED_ROW_FORMAT)) {
// show create table asks us to update this create_info, this makes it
// so we'll always show what compression type we're using
create_info->row_type = get_row_type();
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
if (create_info->row_type == ROW_TYPE_TOKU_ZLIB && THDVAR(ha_thd(), hide_default_row_format) != 0) {
create_info->row_type = ROW_TYPE_DEFAULT;
}
#endif
}
#endif
}
//
......@@ -6778,6 +6783,14 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in
memset(&kc_info, 0, sizeof(kc_info));
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100999
// TokuDB does not support discover_table_names() and writes no files
// in the database directory, so automatic filename-based
// discover_table_names() doesn't work either. So, it must force .frm
// file to disk.
form->s->write_frm_image();
#endif
#if TOKU_INCLUDE_OPTION_STRUCTS
const srv_row_format_t row_format = (srv_row_format_t) form->s->option_struct->row_format;
#else
......
......@@ -756,9 +756,9 @@ class ha_tokudb : public handler {
uchar* buf,
DBT* key_to_compare
);
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
enum row_type get_row_type() const;
#endif
private:
int read_full_row(uchar * buf);
int __close();
......
......@@ -113,6 +113,7 @@ PATENT RIGHTS GRANT:
#endif
#define TOKU_INCLUDE_OPTION_STRUCTS 1
#define TOKU_OPTIMIZE_WITH_RECREATE 1
#define TOKU_CLUSTERING_IS_COVERING 1
#elif 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799
// mysql 5.7 with no patches
......
......@@ -1560,7 +1560,7 @@ static int tokudb_file_map_fill_table(THD *thd, TABLE_LIST *tables, COND *cond)
} else {
error = tokudb_file_map(table, thd);
if (error)
my_error(ER_GET_ERRNO, MYF(0), error);
my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
}
rw_unlock(&tokudb_hton_initialized_lock);
......@@ -1709,7 +1709,7 @@ static int tokudb_fractal_tree_info_fill_table(THD *thd, TABLE_LIST *tables, CON
} else {
error = tokudb_fractal_tree_info(table, thd);
if (error)
my_error(ER_GET_ERRNO, MYF(0), error);
my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
}
//3938: unlock the status flag lock
......@@ -1924,7 +1924,7 @@ static int tokudb_fractal_tree_block_map_fill_table(THD *thd, TABLE_LIST *tables
} else {
error = tokudb_fractal_tree_block_map(table, thd);
if (error)
my_error(ER_GET_ERRNO, MYF(0), error);
my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
}
//3938: unlock the status flag lock
......@@ -2080,7 +2080,7 @@ static int tokudb_trx_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) {
struct tokudb_trx_extra e = { thd, tables->table };
error = db_env->iterate_live_transactions(db_env, tokudb_trx_callback, &e);
if (error)
my_error(ER_GET_ERRNO, MYF(0), error);
my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
}
rw_unlock(&tokudb_hton_initialized_lock);
......@@ -2167,7 +2167,7 @@ static int tokudb_lock_waits_fill_table(THD *thd, TABLE_LIST *tables, COND *cond
struct tokudb_lock_waits_extra e = { thd, tables->table };
error = db_env->iterate_pending_lock_requests(db_env, tokudb_lock_waits_callback, &e);
if (error)
my_error(ER_GET_ERRNO, MYF(0), error);
my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
}
rw_unlock(&tokudb_hton_initialized_lock);
......@@ -2258,7 +2258,7 @@ static int tokudb_locks_fill_table(THD *thd, TABLE_LIST *tables, COND *cond) {
struct tokudb_locks_extra e = { thd, tables->table };
error = db_env->iterate_live_transactions(db_env, tokudb_locks_callback, &e);
if (error)
my_error(ER_GET_ERRNO, MYF(0), error);
my_error(ER_GET_ERRNO, MYF(0), error, tokudb_hton_name);
}
rw_unlock(&tokudb_hton_initialized_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment