Commit faa4d8f8 authored by Sergei Petrunia's avatar Sergei Petrunia

Copy of

commit de1e8c7bfe7c875ea284b55040e8f3cd3a56fcc2
Author: Abhinav Sharma <abhinavsharma@fb.com>
Date:   Thu Aug 23 14:34:39 2018 -0700

    Log updates to semi-sync whitelist in the error log

    Summary:
    Plugin variable changes are not logged in the error log even when
    log_global_var_changes is enabled. Logging updates to whitelist will help in
    debugging.

    Reviewed By: guokeno0

    Differential Revision: D9483807

    fbshipit-source-id: e111cda773d
parent 445e518b
......@@ -82,32 +82,43 @@ ENDIF()
SET(rocksdb_static_libs )
IF (WITH_SNAPPY)
FIND_LIBRARY(SNAPPY_LIBRARY
NAMES libsnappy${PIC_EXT}.a snappy
HINTS ${WITH_SNAPPY}/lib)
SET(rocksdb_static_libs ${rocksdb_static_libs}
${WITH_SNAPPY}/lib/libsnappy${PIC_EXT}.a)
${SNAPPY_LIBRARY})
ADD_DEFINITIONS(-DSNAPPY)
ENDIF()
IF (WITH_LZ4)
FIND_LIBRARY(LZ4_LIBRARY
NAMES liblz4${PIC_EXT}.a lz4
HINTS ${WITH_LZ4}/lib)
SET(rocksdb_static_libs ${rocksdb_static_libs}
${WITH_LZ4}/lib/liblz4${PIC_EXT}.a)
${LZ4_LIBRARY})
ADD_DEFINITIONS(-DLZ4)
ENDIF()
IF (WITH_BZ2)
FIND_LIBRARY(BZ2_LIBRARY
NAMES libbz2${PIC_EXT}.a bz2
HINTS ${WITH_BZ2}/lib)
SET(rocksdb_static_libs ${rocksdb_static_libs}
${WITH_BZ2}/lib/libbz2${PIC_EXT}.a)
${BZ2_LIBRARY})
ADD_DEFINITIONS(-DBZIP2)
ENDIF()
IF (WITH_ZSTD)
SET(rocksdb_static_libs ${rocksdb_static_libs}
${WITH_ZSTD}/lib/libzstd${PIC_EXT}.a)
SET(rocksdb_static_libs ${rocksdb_static_libs} ${ZSTD_LIBRARY})
ADD_DEFINITIONS(-DZSTD)
ENDIF()
IF (WITH_TBB)
FIND_LIBRARY(TBB_LIBRARY
NAMES libtbb${PIC_EXT}.a tbb
HINTS ${WITH_TBB}/lib)
SET(rocksdb_static_libs ${rocksdb_static_libs}
${WITH_TBB}/lib/libtbb${PIC_EXT}.a)
${TBB_LIBRARY})
ADD_DEFINITIONS(-DTBB)
ENDIF()
......
This diff is collapsed.
......@@ -104,6 +104,7 @@ struct Rdb_deadlock_info {
std::string table_name;
};
std::vector <Rdb_dl_trx_info> path;
int64_t deadlock_time;
ulonglong victim_trx_id;
};
......@@ -144,6 +145,11 @@ const char *const BG_THREAD_NAME = "myrocks-bg";
*/
const char *const INDEX_THREAD_NAME = "myrocks-index";
/*
Name for the manual compaction thread.
*/
const char *const MANUAL_COMPACTION_THREAD_NAME = "myrocks-mc";
/*
Separator between partition name and the qualifier. Sample usage:
......@@ -485,6 +491,12 @@ class ha_rocksdb : public my_core::handler {
const rocksdb::Snapshot *m_scan_it_snapshot;
/* Buffers used for upper/lower bounds for m_scan_it. */
uchar *m_scan_it_lower_bound;
uchar *m_scan_it_upper_bound;
rocksdb::Slice m_scan_it_lower_bound_slice;
rocksdb::Slice m_scan_it_upper_bound_slice;
Rdb_tbl_def *m_tbl_def;
/* Primary Key encoder from KeyTupleFormat to StorageFormat */
......@@ -548,12 +560,6 @@ class ha_rocksdb : public my_core::handler {
uchar *m_dup_sk_packed_tuple;
uchar *m_dup_sk_packed_tuple_old;
/* Buffers used for passing upper/bound eq conditions. */
uchar *m_eq_cond_lower_bound;
uchar *m_eq_cond_upper_bound;
rocksdb::Slice m_eq_cond_lower_bound_slice;
rocksdb::Slice m_eq_cond_upper_bound_slice;
/*
Temporary space for packing VARCHARs (we provide it to
pack_record()/pack_index_tuple() calls).
......@@ -635,13 +641,20 @@ class ha_rocksdb : public my_core::handler {
enum ha_rkey_function find_flag) const
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
void setup_iterator_bounds(const Rdb_key_def &kd,
const rocksdb::Slice &eq_cond);
const rocksdb::Slice &eq_cond, size_t bound_len,
uchar *const lower_bound, uchar *const upper_bound,
rocksdb::Slice *lower_bound_slice,
rocksdb::Slice *upper_bound_slice);
bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd,
const rocksdb::Slice &eq_cond,
const bool use_all_keys);
bool check_bloom_and_set_bounds(THD *thd, const Rdb_key_def &kd,
const rocksdb::Slice &eq_cond,
const bool use_all_keys);
const bool use_all_keys, size_t bound_len,
uchar *const lower_bound,
uchar *const upper_bound,
rocksdb::Slice *lower_bound_slice,
rocksdb::Slice *upper_bound_slice);
void setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *slice,
const bool use_all_keys, const uint eq_cond_len)
MY_ATTRIBUTE((__nonnull__));
......@@ -834,7 +847,7 @@ class ha_rocksdb : public my_core::handler {
HA_REC_NOT_IN_SEQ | HA_CAN_INDEX_BLOBS |
(m_pk_can_be_decoded ? HA_PRIMARY_KEY_IN_READ_INDEX : 0) |
HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | HA_NULL_IN_KEY |
HA_PARTIAL_COLUMN_READ);
HA_PARTIAL_COLUMN_READ | HA_ONLINE_ANALYZE);
}
bool init_with_fields() override;
......@@ -1009,6 +1022,7 @@ class ha_rocksdb : public my_core::handler {
}
virtual double read_time(uint, uint, ha_rows rows) override;
virtual void print_error(int error, myf errflag) override;
int open(const char *const name, int mode, uint test_if_locked) override
MY_ATTRIBUTE((__warn_unused_result__));
......@@ -1123,7 +1137,7 @@ class ha_rocksdb : public my_core::handler {
MY_ATTRIBUTE((__nonnull__));
int compare_key_parts(const KEY *const old_key,
const KEY *const new_key) const;
const KEY *const new_key) const
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int compare_keys(const KEY *const old_key, const KEY *const new_key) const
......@@ -1180,7 +1194,7 @@ class ha_rocksdb : public my_core::handler {
int update_pk(const Rdb_key_def &kd, const struct update_row_info &row_info,
const bool &pk_changed) MY_ATTRIBUTE((__warn_unused_result__));
int update_sk(const TABLE *const table_arg, const Rdb_key_def &kd,
const struct update_row_info &row_info)
const struct update_row_info &row_info, const bool bulk_load_sk)
MY_ATTRIBUTE((__warn_unused_result__));
int update_indexes(const struct update_row_info &row_info,
const bool &pk_changed)
......@@ -1234,7 +1248,9 @@ class ha_rocksdb : public my_core::handler {
int finalize_bulk_load(bool print_client_error = true)
MY_ATTRIBUTE((__warn_unused_result__));
public:
int calculate_stats_for_table() MY_ATTRIBUTE((__warn_unused_result__));
public:
int index_init(uint idx, bool sorted) override
MY_ATTRIBUTE((__warn_unused_result__));
int index_end() override MY_ATTRIBUTE((__warn_unused_result__));
......@@ -1327,9 +1343,6 @@ class ha_rocksdb : public my_core::handler {
MY_ATTRIBUTE((__warn_unused_result__));
int analyze(THD *const thd, HA_CHECK_OPT *const check_opt) override
MY_ATTRIBUTE((__warn_unused_result__));
int calculate_stats(const TABLE *const table_arg, THD *const thd,
HA_CHECK_OPT *const check_opt)
MY_ATTRIBUTE((__warn_unused_result__));
enum_alter_inplace_result check_if_supported_inplace_alter(
TABLE *altered_table,
......@@ -1356,7 +1369,7 @@ class ha_rocksdb : public my_core::handler {
virtual void rpl_after_delete_rows() override;
virtual void rpl_before_update_rows() override;
virtual void rpl_after_update_rows() override;
virtual bool use_read_free_rpl();
virtual bool use_read_free_rpl() override;
private:
/* Flags tracking if we are inside different replication operation */
......
......@@ -39,7 +39,12 @@ enum RDB_IO_ERROR_TYPE {
const char *get_rdb_io_error_string(const RDB_IO_ERROR_TYPE err_type);
void rdb_handle_io_error(const rocksdb::Status status,
const RDB_IO_ERROR_TYPE err_type);
const RDB_IO_ERROR_TYPE err_type)
#if defined(__clang__)
MY_ATTRIBUTE((optnone));
#else
MY_ATTRIBUTE((optimize("O0")));
#endif
int rdb_normalize_tablename(const std::string &tablename, std::string *str)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
......
[write_committed]
rocksdb_write_policy=write_committed
[write_prepared]
rocksdb_write_policy=write_prepared
rocksdb_commit_time_batch_for_recovery=on
if (`select count(*) = 0 from information_schema.session_variables where variable_name = 'rocksdb_write_policy' and variable_value = 'write_committed';`) {
--skip Test requires write_committed policy
}
......@@ -287,11 +287,13 @@ set global rocksdb_bulk_load=1;
show global variables like 'rocksdb_bulk_load%';
Variable_name Value
rocksdb_bulk_load ON
rocksdb_bulk_load_allow_sk OFF
rocksdb_bulk_load_allow_unsorted OFF
rocksdb_bulk_load_size 1000
show session variables like 'rocksdb_bulk_load%';
Variable_name Value
rocksdb_bulk_load ON
rocksdb_bulk_load_allow_sk OFF
rocksdb_bulk_load_allow_unsorted OFF
rocksdb_bulk_load_size 1000
CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
......@@ -335,6 +337,7 @@ SET session rocksdb_merge_buf_size = 340;
show variables like 'rocksdb_bulk_load%';
Variable_name Value
rocksdb_bulk_load OFF
rocksdb_bulk_load_allow_sk OFF
rocksdb_bulk_load_allow_unsorted OFF
rocksdb_bulk_load_size 1000
CREATE TABLE t1 (a VARCHAR(80)) ENGINE=RocksDB;
......@@ -442,3 +445,24 @@ t1 CREATE TABLE `t1` (
KEY `kb` (`b`(8))
) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin
DROP TABLE t1;
SET @prior_rocksdb_table_stats_sampling_pct = @@rocksdb_table_stats_sampling_pct;
set global rocksdb_table_stats_sampling_pct = 100;
CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB;
INSERT INTO t1 (a, b) VALUES (1, 10);
INSERT INTO t1 (a, b) VALUES (2, 10);
INSERT INTO t1 (a, b) VALUES (3, 20);
INSERT INTO t1 (a, b) VALUES (4, 20);
set global rocksdb_force_flush_memtable_now=1;
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
SHOW INDEX in t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 0 PRIMARY 1 a A 4 NULL NULL LSMTREE
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
SHOW INDEX in t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 0 PRIMARY 1 a A 4 NULL NULL LSMTREE
t1 1 kb 1 b A 2 NULL NULL YES LSMTREE
DROP TABLE t1;
SET global rocksdb_table_stats_sampling_pct = @prior_rocksdb_table_stats_sampling_pct;
......@@ -15,7 +15,7 @@ count(b)
3000000
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
ERROR HY000: Status error 10 received from RocksDB: Operation aborted: Failed to acquire lock due to max_num_locks limit
ERROR HY000: Got error 10 'Operation aborted: Failed to acquire lock due to max_num_locks limit' from ROCKSDB
set session rocksdb_bulk_load=1;
ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
set session rocksdb_bulk_load=0;
......
......@@ -262,3 +262,28 @@ SELECT * FROM t1;
a b
36 foo
DROP TABLE t1;
#
# Issue #834/MDEV-15304 ALTER TABLE table_with_hidden_pk causes Can't
# write; duplicate key in table error and/or crash
#
CREATE TABLE t1 (a INT, KEY(a)) ENGINE=RocksDB;
INSERT INTO t1 VALUES (1),(1+1);
create table t2 (a int);
insert into t2 values (10),(20),(30);
BEGIN;
select * from t2;
a
10
20
30
alter table t1 force;
select * from t1;
a
insert into t1 values (100);
select * from t1;
a
1
2
100
rollback;
drop table t1,t2;
......@@ -59,12 +59,10 @@ insert into t values ();
set debug="+d,crash_commit_before";
commit;
ERROR HY000: Lost connection to MySQL server during query
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
table_schema table_name auto_increment
test t 4
select max(i) from t;
max(i)
3
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
table_schema table_name auto_increment > @row_max
test t 1
# After engine prepare
begin;
insert into t values ();
......@@ -72,12 +70,10 @@ insert into t values ();
set debug="+d,crash_commit_after_prepare";
commit;
ERROR HY000: Lost connection to MySQL server during query
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
table_schema table_name auto_increment
test t 4
select max(i) from t;
max(i)
3
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
table_schema table_name auto_increment > @row_max
test t 1
# After binlog
begin;
insert into t values ();
......@@ -85,12 +81,10 @@ insert into t values ();
set debug="+d,crash_commit_after_log";
commit;
ERROR HY000: Lost connection to MySQL server during query
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
table_schema table_name auto_increment
test t 6
select max(i) from t;
max(i)
5
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
table_schema table_name auto_increment > @row_max
test t 1
# After everything
begin;
insert into t values ();
......@@ -98,10 +92,8 @@ insert into t values ();
set debug="+d,crash_commit_after";
commit;
ERROR HY000: Lost connection to MySQL server during query
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
table_schema table_name auto_increment
test t 8
select max(i) from t;
max(i)
7
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
table_schema table_name auto_increment > @row_max
test t 1
drop table t;
......@@ -141,3 +141,21 @@ SELECT * FROM t1;
a b
18446744073709551613 a
DROP TABLE t1;
#----------------------------------
# Issue #792 Crash in autoincrement
#----------------------------------
CREATE TABLE t1(C1 DOUBLE AUTO_INCREMENT KEY,C2 CHAR) ENGINE=ROCKSDB;
INSERT INTO t1 VALUES(2177,0);
DROP TABLE t1;
CREATE TABLE t0(c0 BLOB) ENGINE=ROCKSDB;
INSERT INTO t0 VALUES(0);
ALTER TABLE t0 AUTO_INCREMENT=0;
DROP TABLE t0;
#----------------------------------
# Issue #869 Crash in autoincrement
#----------------------------------
CREATE TABLE t1 (pk INT AUTO_INCREMENT, a INT, PRIMARY KEY(pk)) ENGINE=RocksDB;
INSERT INTO t1 (a) VALUES (1);
UPDATE t1 SET pk = 3;
ALTER TABLE t1 AUTO_INCREMENT 2;
DROP TABLE t1;
#
# Issue #809: Wrong query result with bloom filters
#
create table t1 (
id1 bigint not null,
id2 bigint not null,
id3 varchar(100) not null,
id4 int not null,
id5 int not null,
value bigint,
value2 varchar(100),
primary key (id1, id2, id3, id4) COMMENT 'rev:bf5_1'
) engine=ROCKSDB;
create table t2(a int);
insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t3(seq int);
insert into t3
select
1+ A.a + B.a* 10 + C.a * 100 + D.a * 1000
from t2 A, t2 B, t2 C, t2 D;
insert t1
select
(seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc"
from t3;
set global rocksdb_force_flush_memtable_now=1;
# Full table scan
explain
select * from t1 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 10000 NULL
select * from t1 limit 10;
id1 id2 id3 id4 id5 value value2
1000 2000 2000 10000 10000 1000 aaabbbccc
1000 2000 2000 9999 9999 1000 aaabbbccc
1000 2000 2000 9998 9998 1000 aaabbbccc
1000 2000 2000 9997 9997 1000 aaabbbccc
1000 2000 2000 9996 9996 1000 aaabbbccc
1000 1999 1999 9995 9995 1000 aaabbbccc
1000 1999 1999 9994 9994 1000 aaabbbccc
1000 1999 1999 9993 9993 1000 aaabbbccc
1000 1999 1999 9992 9992 1000 aaabbbccc
1000 1999 1999 9991 9991 1000 aaabbbccc
# An index scan starting from the end of the table:
explain
select * from t1 order by id1 desc,id2 desc, id3 desc, id4 desc limit 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL PRIMARY 122 NULL 1 NULL
select * from t1 order by id1 desc,id2 desc, id3 desc, id4 desc limit 1;
id1 id2 id3 id4 id5 value value2
1000 2000 2000 10000 10000 1000 aaabbbccc
create table t4 (
pk int unsigned not null primary key,
kp1 int unsigned not null,
kp2 int unsigned not null,
col1 int unsigned,
key(kp1, kp2) comment 'rev:bf5_2'
) engine=rocksdb;
insert into t4 values (1, 0xFFFF, 0xFFF, 12345);
# This must not fail an assert:
select * from t4 force index(kp1) where kp1=0xFFFFFFFF and kp2<=0xFFFFFFFF order by kp2 desc;
pk kp1 kp2 col1
drop table t1,t2,t3,t4;
create table r1 (id bigint primary key, value bigint) engine=rocksdb;
create table r2 (id bigint, value bigint, primary key (id) comment 'cf2') engine=rocksdb;
set session rocksdb_bulk_load=1;
set session rocksdb_bulk_load=0;
select variable_value into @h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
insert into r1 values (100, 100);
select variable_value-@h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
variable_value-@h
1
select variable_value into @h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
insert into r2 values (100, 100);
select variable_value-@h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
variable_value-@h
0
DROP TABLE r1, r2;
SET rocksdb_bulk_load_size=15;
CREATE TABLE t4 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
CREATE TABLE t3 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
CREATE TABLE t2 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
CREATE TABLE t1 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
SET rocksdb_bulk_load=1;
INSERT INTO t1 SELECT * FROM t3 FORCE INDEX (PRIMARY) ORDER BY a;
SELECT count(*) FROM t1 FORCE INDEX (PRIMARY);
count(*)
0
SELECT count(*) FROM t1 FORCE INDEX (b);
count(*)
10
SELECT count(*) FROM t1 FORCE INDEX (c);
count(*)
10
SET rocksdb_bulk_load=0;
SELECT * FROM t1 FORCE INDEX (PRIMARY);
a b c
-9 11 11
-7 9 9
-5 7 7
-3 5 5
-1 3 3
2 0 0
4 -2 -2
6 -4 -4
8 -6 -6
10 -8 -8
SELECT b FROM t1 FORCE INDEX (b);
b
-8
-6
-4
-2
0
3
5
7
9
11
SELECT c FROM t1 FORCE INDEX (c);
c
-8
-6
-4
-2
0
3
5
7
9
11
Checksums should match
CHECKSUM TABLE t3;
Table Checksum
test.t3 3862424802
CHECKSUM TABLE t1;
Table Checksum
test.t1 3862424802
SET rocksdb_bulk_load_allow_sk=1;
SET rocksdb_bulk_load=1;
INSERT INTO t4 SELECT * FROM t3 FORCE INDEX (PRIMARY) ORDER BY a;
SELECT count(*) FROM t4 FORCE INDEX (PRIMARY);
count(*)
0
SELECT count(*) FROM t4 FORCE INDEX (b);
count(*)
0
SELECT count(*) FROM t4 FORCE INDEX (c);
count(*)
0
SET rocksdb_bulk_load=0;
SELECT * FROM t4 FORCE INDEX (PRIMARY);
a b c
-9 11 11
-7 9 9
-5 7 7
-3 5 5
-1 3 3
2 0 0
4 -2 -2
6 -4 -4
8 -6 -6
10 -8 -8
SELECT b FROM t4 FORCE INDEX (b);
b
-8
-6
-4
-2
0
3
5
7
9
11
SELECT c FROM t4 FORCE INDEX (c);
c
-8
-6
-4
-2
0
3
5
7
9
11
Checksums should match
CHECKSUM TABLE t3;
Table Checksum
test.t3 3862424802
CHECKSUM TABLE t4;
Table Checksum
test.t4 3862424802
SET rocksdb_bulk_load_allow_unsorted=1;
SET rocksdb_bulk_load_allow_sk=1;
SET rocksdb_bulk_load=1;
INSERT INTO t2 SELECT * FROM t3 WHERE b >= 0 ORDER BY b;
INSERT INTO t2 SELECT * FROM t3 WHERE b < 0 ORDER BY b;
SELECT count(*) FROM t2 FORCE INDEX (PRIMARY);
count(*)
0
SELECT count(*) FROM t2 FORCE INDEX (b);
count(*)
0
SELECT count(*) FROM t2 FORCE INDEX (c);
count(*)
0
SELECT count(*) FROM t2 FORCE INDEX (PRIMARY);
count(*)
0
SELECT count(*) FROM t2 FORCE INDEX (b);
count(*)
0
SELECT count(*) FROM t2 FORCE INDEX (c);
count(*)
0
SET rocksdb_bulk_load=0;
SELECT * FROM t2 FORCE INDEX (PRIMARY);
a b c
-19 21 21
-17 19 19
-15 17 17
-13 15 15
-11 13 13
-9 11 11
-7 9 9
-5 7 7
-3 5 5
-1 3 3
2 0 0
4 -2 -2
6 -4 -4
8 -6 -6
10 -8 -8
12 -10 -10
14 -12 -12
16 -14 -14
18 -16 -16
20 -18 -18
SELECT b FROM t2 FORCE INDEX (b);
b
-18
-16
-14
-12
-10
-8
-6
-4
-2
0
3
5
7
9
11
13
15
17
19
21
SELECT c FROM t2 FORCE INDEX (c);
c
-18
-16
-14
-12
-10
-8
-6
-4
-2
0
3
5
7
9
11
13
15
17
19
21
Checksums should match
CHECKSUM TABLE t3;
Table Checksum
test.t3 1495594118
CHECKSUM TABLE t2;
Table Checksum
test.t2 1495594118
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
DROP TABLE t4;
......@@ -82,4 +82,19 @@ t1 1 t1_5 2 c1 A 100000 NULL NULL YES LSMTREE
SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
table_name table_rows
t1 100000
drop table t1;
CREATE TABLE t2 (a INT, b INT, c INT, d INT, e INT, f INT, g INT,
PRIMARY KEY (a), KEY (c, b, a, d, e, f, g))
ENGINE=ROCKSDB;
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
test.t2 analyze status OK
cardinality of the columns after 'a' must be equal to the cardinality of column 'a'
SELECT CARDINALITY INTO @c FROM information_schema.statistics WHERE TABLE_NAME='t2' AND INDEX_NAME='c' AND COLUMN_NAME='a';
SELECT COLUMN_NAME, CARDINALITY = @c FROM information_schema.statistics WHERE TABLE_NAME='t2' AND INDEX_NAME='c' AND SEQ_IN_INDEX > 3;
COLUMN_NAME CARDINALITY = @c
d 1
e 1
f 1
g 1
drop table t1, t2;
SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
DROP TABLE IF EXISTS t1;
call mtr.add_suppression("Invalid pattern");
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8;
ALTER TABLE t1 ADD INDEX (value);
ERROR HY000: Unsupported collation on string indexed column test.t1.value Use binary collation (binary, latin1_bin, utf8_bin).
DROP TABLE t1;
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8;
ERROR HY000: Unsupported collation on string indexed column test.t1.value Use binary collation (binary, latin1_bin, utf8_bin).
......@@ -13,6 +14,7 @@ SET GLOBAL rocksdb_strict_collation_check=1;
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value2)) engine=rocksdb charset utf8;
DROP TABLE t1;
CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin;
ALTER TABLE t1 collate=latin1_general_ci;
DROP TABLE t1;
CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset utf8 collate utf8_bin;
DROP TABLE t1;
......@@ -126,4 +128,16 @@ CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=r
ERROR HY000: Unsupported collation on string indexed column test.abcd.value Use binary collation (binary, latin1_bin, utf8_bin).
DROP TABLE abc;
SET GLOBAL rocksdb_strict_collation_exceptions=null;
SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value;
SET GLOBAL rocksdb_strict_collation_check=1;
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8;
Warnings:
Warning 1210 Unsupported collation on string indexed column test.t1.value Use binary collation (binary, latin1_bin, utf8_bin).
DROP TABLE t1;
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8;
ALTER TABLE t1 ADD INDEX (value);
Warnings:
Warning 1210 Unsupported collation on string indexed column test.t1.value Use binary collation (binary, latin1_bin, utf8_bin).
DROP TABLE t1;
CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin;
ALTER TABLE t1 collate=latin1_general_ci;
DROP TABLE t1;
CREATE DATABASE db_rpc;
USE db_rpc;
CREATE TABLE t1(pk INT PRIMARY KEY) ENGINE=rocksdb;
SET GLOBAL rocksdb_enable_2pc=1;
SET autocommit = 0;
SET autocommit = 0;
BEGIN;
BEGIN;
SELECT * from t1;
pk
SELECT * from t1;
pk
INSERT INTO t1 VALUES(1);
INSERT INTO t1 VALUES(2);
COMMIT;
COMMIT;
SELECT * from db_rpc.t1;
pk
1
2
DROP DATABASE db_rpc;
USE mysql;
CREATE TABLE mysql_table (a INT) ENGINE=ROCKSDB;
CREATE TABLE test.mysql_table (a INT) ENGINE=ROCKSDB;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
USE test;
CREATE TABLE mysql_table (a INT) ENGINE=ROCKSDB;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
CREATE TABLE IF NOT EXISTS mysql_table_2 (a INT) ENGINE=ROCKSDB;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
CREATE TABLE mysql_table_no_cols ENGINE=ROCKSDB;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
CREATE TABLE mysql.mysql_table_2 (a INT) ENGINE=ROCKSDB;
CREATE TABLE mysql_primkey (a INT PRIMARY KEY, b INT, c INT, d INT, INDEX (c)) ENGINE=ROCKSDB;
ALTER TABLE mysql_primkey DROP b, DROP a, ADD (f INT PRIMARY KEY);
ALTER TABLE mysql_primkey DROP PRIMARY KEY;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
CREATE TABLE mysql_primkey2 (a INT PRIMARY KEY, b INT, c INT) ENGINE=ROCKSDB;
ALTER TABLE mysql_primkey2 DROP b;
ALTER TABLE mysql_primkey2 ADD (b INT);
ALTER TABLE mysql_primkey2 DROP c, DROP A;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
CREATE TABLE mysql_primkey3 (a INT PRIMARY KEY, b INT, c INT, INDEX indexonb (b), INDEX indexonc (c)) ENGINE=ROCKSDB;
ALTER TABLE mysql_primkey3 DROP INDEX indexonb;
ALTER TABLE mysql_primkey3 DROP c;
ALTER TABLE mysql_primkey3 DROP PRIMARY KEY, ADD PRIMARY KEY(b);
CREATE TABLE mysql_primkey4(a INT, b INT, PRIMARY KEY(a), INDEX si (a, b)) ENGINE=ROCKSDB;
DROP INDEX si ON mysql_primkey4;
DROP INDEX `PRIMARY` ON mysql_primkey4;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
ALTER TABLE mysql.mysql_table ADD PRIMARY KEY (a);
ALTER TABLE mysql.mysql_table DROP PRIMARY KEY;
DROP TABLE mysql_primkey;
DROP TABLE mysql_primkey2;
DROP TABLE mysql_primkey3;
DROP TABLE mysql_primkey4;
USE mysql;
DROP TABLE mysql_table;
DROP TABLE mysql_table_2;
......@@ -45,7 +45,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
alter table t1 modify i bigint;;
set high_priority_ddl = 0;
......@@ -98,7 +98,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
alter table t1 rename t1_new;;
set high_priority_ddl = 0;
......@@ -152,7 +152,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
drop table t1;;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
......@@ -202,7 +202,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
drop table t1;;
set high_priority_ddl = 0;
......@@ -251,7 +251,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
alter table t1 modify i bigint;;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
......@@ -302,7 +302,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
create index idx1 on t1 (i);;
set high_priority_ddl = 0;
......@@ -342,7 +342,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
drop index idx1 on t1;;
set high_priority_ddl = 0;
......@@ -390,7 +390,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
truncate t1;;
set high_priority_ddl = 0;
......@@ -438,7 +438,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
create trigger ins_sum before insert on t1 for each row set @sum = @sum + new.i;;
set high_priority_ddl = 0;
......@@ -478,7 +478,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
drop trigger ins_sum;;
set high_priority_ddl = 0;
......@@ -528,7 +528,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
optimize table t1;;
Table Op Msg_type Msg_text
......@@ -538,6 +538,55 @@ connection: default (for show processlist)
show processlist;
Id User Host db Command Time State Info Rows examined Rows sent Tid Srv_Id
<Id> root <Host> test <Command> <Time> <State> <Info> <RExam> <RSent> <TID> 0
## Test parameters:
## use_sys_var = 1;
## con_block = con1
## con_kill = default
## cmd = lock tables t1 write;
## high_priority_cmd = optimize high_priority table t1;
## should_kill = 1
## recreate_table = 1
## throw_error = 1
drop table if exists t1;
create table t1 (i int);
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`i` int(11) DEFAULT NULL
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
insert into t1 values (1), (2), (3);
connection: con1
lock tables t1 read;;
connection: default
set lock_wait_timeout = 0.02;
set high_priority_lock_wait_timeout = 0.02;
describe t1;
Field Type Null Key Default Extra
i int(11) YES NULL
connection: default (for show processlist)
# both con1 and default exist
show processlist;
Id User Host db Command Time State Info Rows examined Rows sent Tid Srv_Id
<Id> root <Host> test <Command> <Time> <State> <Info> <RExam> <RSent> <TID> 0
<Id> test_user1 <Host> test <Command> <Time> <State> <Info> <RExam> <RSent> <TID> 0
connection: default
lock tables t1 write;;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
lock tables t1 write;;
set high_priority_ddl = 0;
connection: default (for show processlist)
show processlist;
Id User Host db Command Time State Info Rows examined Rows sent Tid Srv_Id
<Id> root <Host> test <Command> <Time> <State> <Info> <RExam> <RSent> <TID> 0
unlock tables;
drop user test_user1@localhost;
drop user test_user2@localhost;
drop table if exists t1;
......
......@@ -53,6 +53,7 @@ LIST OF SNAPSHOTS FOR EACH SESSION:
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -60,6 +61,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -102,6 +104,7 @@ LIST OF SNAPSHOTS FOR EACH SESSION:
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -109,6 +112,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -120,6 +124,7 @@ TABLE NAME: test.t
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -127,6 +132,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -170,6 +176,7 @@ LIST OF SNAPSHOTS FOR EACH SESSION:
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -177,6 +184,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -188,6 +196,7 @@ TABLE NAME: test.t
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -195,6 +204,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -206,6 +216,7 @@ TABLE NAME: test.t
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -213,6 +224,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -240,6 +252,7 @@ LIST OF SNAPSHOTS FOR EACH SESSION:
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -247,6 +260,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -352,6 +366,7 @@ LIST OF SNAPSHOTS FOR EACH SESSION:
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -359,6 +374,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -373,6 +389,25 @@ TABLE NAME: test.t
END OF ROCKSDB TRANSACTION MONITOR OUTPUT
=========================================
Deadlock #6
create table t1 (id int primary key, value int) engine=rocksdb;
insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5);
begin;
update t1 set value=value+100 where id=1;
update t1 set value=value+100 where id=2;
begin;
update t1 set value=value+200 where id=3;
update t1 set value=value+100 where id=3;
update t1 set value=value+200 where id=1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
select * from t1;
id value
1 101
2 102
3 103
4 4
5 5
drop table t1;
set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_deadlock_detect;
drop table t;
......@@ -390,6 +425,27 @@ LIST OF SNAPSHOTS FOR EACH SESSION:
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
LOCK TYPE: EXCLUSIVE
INDEX NAME: NOT FOUND; IDX_ID
TABLE NAME: NOT FOUND; IDX_ID
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
LOCK TYPE: EXCLUSIVE
INDEX NAME: NOT FOUND; IDX_ID
TABLE NAME: NOT FOUND; IDX_ID
--------TXN_ID GOT DEADLOCK---------
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -397,6 +453,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: NOT FOUND; IDX_ID
TABLE NAME: NOT FOUND; IDX_ID
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......
......@@ -43,6 +43,17 @@ DELETE FROM t4;
drop table t3;
DELETE FROM t1;
DELETE FROM t4;
SET GLOBAL rocksdb_max_manual_compactions = 2;
SET GLOBAL rocksdb_debug_manual_compaction_delay = 3600;
SET GLOBAL rocksdb_compact_cf='cf1';
SET GLOBAL rocksdb_compact_cf='rev:cf2';
select * from information_schema.global_status where variable_name='rocksdb_manual_compactions_running';
VARIABLE_NAME VARIABLE_VALUE
ROCKSDB_MANUAL_COMPACTIONS_RUNNING 1
SET GLOBAL rocksdb_compact_cf='cf1';
ERROR HY000: Internal error: Can't schedule more manual compactions. Increase rocksdb_max_manual_compactions or stop issuing more manual compactions.
SET GLOBAL rocksdb_compact_cf='rev:cf2';
ERROR HY000: Internal error: Can't schedule more manual compactions. Increase rocksdb_max_manual_compactions or stop issuing more manual compactions.
drop table t4;
CREATE TABLE t5 (
a int not null,
......
DROP TABLE IF EXISTS t1;
CREATE TABLE T1 (a INT PRIMARY KEY AUTO_INCREMENT) ENGINE=ROCKSDB;
INSERT INTO T1 VALUES();
"con1: Creating explict snapshot"
SELECT * FROM T1;
a
1
"con2: Inserting a row"
INSERT INTO T1 VALUES();
SELECT * FROM T1;
a
1
2
"con2: Attaching snapshot id 1"
ATTACH EXPLICIT ROCKSDB SNAPSHOT 1;
"con2: New row should not be visible"
SELECT * FROM T1;
a
1
"con2: Releasing snapshot"
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
"con2: New row should be visible"
SELECT * FROM T1;
a
1
2
"con1: New row should not be visible"
SELECT * FROM T1;
a
1
"con1: Releasing snapshot"
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
"con1: New row should be visible"
SELECT * FROM T1;
a
1
2
"con1: Starting shared snapshot"
SELECT * FROM T1;
a
1
2
"con2: Inserting a row"
INSERT INTO T1 VALUES();
SELECT * FROM T1;
a
1
2
3
"con2: Starting existing snapshot"
START TRANSACTION WITH EXISTING ROCKSDB SNAPSHOT 2;
"con2: New row should not be visible"
SELECT * FROM T1;
a
1
2
COMMIT;
"con2: New row should be visible"
SELECT * FROM T1;
a
1
2
3
COMMIT;
"con1: New row should be visible"
SELECT * FROM T1;
a
1
2
3
"con1: Creating explict snapshot"
"con2: Trying to insert row"
INSERT INTO T1 VALUES();
ERROR HY000: Can't execute updates when an explicit snapshot is associated with the connection using CREATE|ATTACH EXPLICIT [ENGINE] SNAPSHOT
"con2: Attaching existing snapshot"
ATTACH EXPLICIT ROCKSDB SNAPSHOT 3;
"con2: Trying to insert row"
INSERT INTO T1 VALUES();
ERROR HY000: Can't execute updates when an explicit snapshot is associated with the connection using CREATE|ATTACH EXPLICIT [ENGINE] SNAPSHOT
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
"con1: Starting shared snapshot"
"con1: Trying to insert row"
INSERT INTO T1 VALUES();
ERROR HY000: Can't execute updates when you started a transaction with START TRANSACTION WITH CONSISTENT|SHARED|EXISTING [ROCKSDB] SNAPSHOT.
"con2: Starting existing snapshot"
START TRANSACTION WITH EXISTING ROCKSDB SNAPSHOT 4;
"con2: Trying to insert row"
INSERT INTO T1 VALUES();
ERROR HY000: Can't execute updates when you started a transaction with START TRANSACTION WITH CONSISTENT|SHARED|EXISTING [ROCKSDB] SNAPSHOT.
COMMIT;
COMMIT;
"con1: Creating explicit snapshot"
CREATE EXPLICIT ROCKSDB SNAPSHOT;
SELECT * FROM T1;
a
1
2
3
"con2: Inserting a row"
INSERT INTO T1 VALUES();
SELECT * FROM T1;
a
1
2
3
4
"con1: New row should not be seen"
SELECT * FROM T1;
a
1
2
3
"con1: Creating another explicit snapshot"
CREATE EXPLICIT ROCKSDB SNAPSHOT;
"con1: Now the new row should be seen"
SELECT * FROM T1;
a
1
2
3
4
"con1: Starting transaction with consistent snapshot"
START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
SELECT * FROM T1;
a
1
2
3
4
"con2: Inserting a row"
INSERT INTO T1 VALUES();
SELECT * FROM T1;
a
1
2
3
4
5
"con1: The new row should not be seen"
SELECT * FROM T1;
a
1
2
3
4
"con1: Creating another explicit snapshot"
CREATE EXPLICIT ROCKSDB SNAPSHOT;
"con1: The new row should still not be seen"
SELECT * FROM T1;
a
1
2
3
4
"con1: Committing trx"
COMMIT;
"con1: The new row should now be seen because of the new explicit snapshot created above"
SELECT * FROM T1;
a
1
2
3
4
5
"con1: Releasing explicit snapshot"
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
"con1: Starting transaction with shared snapshot"
START TRANSACTION WITH SHARED ROCKSDB SNAPSHOT;
SELECT * FROM T1;
a
1
2
3
4
5
"con2: Inserting a row"
INSERT INTO T1 VALUES();
SELECT * FROM T1;
a
1
2
3
4
5
6
"con1: The new row should not be seen"
SELECT * FROM T1;
a
1
2
3
4
5
"con1: Starting another transaction with shared snapshot"
START TRANSACTION WITH SHARED ROCKSDB SNAPSHOT;
"con1: The new row should now be seen"
SELECT * FROM T1;
a
1
2
3
4
5
6
COMMIT;
"con1: Creating explicit snapshot"
CREATE EXPLICIT ROCKSDB SNAPSHOT;
SELECT * FROM T1;
a
1
2
3
4
5
6
"con1: Releasing explicit snapshot"
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
"con1: Releasing explicit snapshot again"
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
ERROR HY000: Cannot process explicit snapshot
"con1: Starting transaction with shared snapshot"
START TRANSACTION WITH SHARED ROCKSDB SNAPSHOT;
SELECT * FROM T1;
a
1
2
3
4
5
6
"con2: Inserting a row"
INSERT INTO T1 VALUES();
SELECT * FROM T1;
a
1
2
3
4
5
6
7
"con1: Creating explicit snapshot"
CREATE EXPLICIT ROCKSDB SNAPSHOT;
SELECT * FROM T1;
a
1
2
3
4
5
6
"con1: Releasing explicit snapshot"
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
"con1: The new row should not be seen"
SELECT* FROM T1;
a
1
2
3
4
5
6
COMMIT;
DROP TABLE T1;
......@@ -10,6 +10,7 @@ show create table information_schema.rocksdb_deadlock;
Table Create Table
ROCKSDB_DEADLOCK CREATE TEMPORARY TABLE `ROCKSDB_DEADLOCK` (
`DEADLOCK_ID` bigint(8) NOT NULL DEFAULT '0',
`TIMESTAMP` bigint(8) NOT NULL DEFAULT '0',
`TRANSACTION_ID` bigint(8) NOT NULL DEFAULT '0',
`CF_NAME` varchar(193) NOT NULL DEFAULT '',
`WAITING_KEY` varchar(513) NOT NULL DEFAULT '',
......@@ -21,7 +22,7 @@ ROCKSDB_DEADLOCK CREATE TEMPORARY TABLE `ROCKSDB_DEADLOCK` (
create table t (i int primary key) engine=rocksdb;
insert into t values (1), (2), (3);
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
Deadlock #1
begin;
select * from t where i=1 for update;
......@@ -39,9 +40,9 @@ i
2
rollback;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
Deadlock #2
begin;
select * from t where i=1 for update;
......@@ -59,11 +60,11 @@ i
2
rollback;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
set global rocksdb_max_latest_deadlocks = 10;
Deadlock #3
begin;
......@@ -82,18 +83,18 @@ i
2
rollback;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
set global rocksdb_max_latest_deadlocks = 1;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
set rocksdb_deadlock_detect_depth = 2;
Deadlock #4
begin;
......@@ -121,7 +122,7 @@ i
rollback;
set global rocksdb_max_latest_deadlocks = 5;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
Deadlock #5
begin;
select * from t where i=1 for update;
......@@ -155,18 +156,18 @@ i
rollback;
rollback;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY SHARED PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY SHARED PRIMARY test.t 1
set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_deadlock_detect;
drop table t;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE INDEX_NAME TABLE_NAME 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY SHARED INDEX_NAME TABLE_NAME 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE INDEX_NAME TABLE_NAME 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY SHARED INDEX_NAME TABLE_NAME 1
set global rocksdb_max_latest_deadlocks = 0;
# Clears deadlock buffer of any existent deadlocks.
set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
......@@ -24,5 +24,8 @@ WHERE INDEX_NUMBER =
WHERE TABLE_NAME = 't2' AND INDEX_NAME = "PRIMARY");
COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX
# # SSTNAME 4 # # # # # 4
select count(*) > 0 from information_schema.rocksdb_sst_props;
count(*) > 0
1
DROP TABLE t1;
DROP TABLE t2;
......@@ -12,6 +12,7 @@ count(*)
select VALUE into @keysIn from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn';
CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB;
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3);
set global rocksdb_force_flush_memtable_now = true;
select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
TYPE NAME VALUE
BINLOG FILE master-bin.000001
......
......@@ -46,7 +46,7 @@ pk
127
SHOW TABLE STATUS LIKE 't1';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ROCKSDB 10 Fixed 2 15 30 0 0 0 127 NULL NULL NULL latin1_swedish_ci NULL
t1 ROCKSDB # Fixed 2 # # # # # 127 NULL NULL NULL latin1_swedish_ci NULL
INSERT INTO t1 VALUES ();
ERROR 23000: Duplicate entry '127' for key 'PRIMARY'
SELECT * FROM t1;
......
......@@ -106,3 +106,18 @@ SELECT a,b FROM t1;
a b
UNLOCK TABLES;
DROP TABLE t1, t2;
CREATE TABLE t1 (i INT) ENGINE=MyISAM;
HANDLER t1 OPEN h;
CREATE TABLE t2 (i INT) ENGINE=RocksDB;
LOCK TABLES t2 WRITE;
connect con1,localhost,root,,test;
connection con1;
FLUSH TABLES WITH READ LOCK;
connection default;
INSERT INTO t2 VALUES (1);
UNLOCK TABLES;
HANDLER h CLOSE;
connection con1;
disconnect con1;
connection default;
DROP TABLE t1, t2;
......@@ -34,7 +34,10 @@ update r1 set value1=value1+100 where id1=1 and id2=1 and id3='1';
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
/*!50601 SELECT count(*) INTO @is_rocksdb_supported FROM information_schema.SESSION_VARIABLES WHERE variable_name='rocksdb_bulk_load' */;
/*!50601 SELECT count(*) INTO @is_mysql8 FROM information_schema.TABLES WHERE table_schema='performance_schema' AND table_name='session_variables' */;
/*!50601 SET @check_rocksdb = CONCAT( 'SELECT count(*) INTO @is_rocksdb_supported FROM ', IF (@is_mysql8, 'performance', 'information'), '_schema.session_variables WHERE variable_name=\'rocksdb_bulk_load\'') */;
/*!50601 PREPARE s FROM @check_rocksdb */;
/*!50601 EXECUTE s */;
/*!50601 SET @enable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load=1', 'SET @dummy = 0') */;
/*!50601 PREPARE s FROM @enable_bulk_load */;
/*!50601 EXECUTE s */;
......
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=RocksDB;
INSERT INTO t1 VALUES (1), (2), (3);
SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
SELECT * FROM t1;
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
SELECT * FROM t1;
a
1
2
3
SET DEBUG_SYNC="now SIGNAL finish_scan";
a
1
2
3
DROP TABLE t1;
......@@ -24,11 +24,9 @@ CF_NAME OPTION_TYPE VALUE
__system__ PREFIX_EXTRACTOR rocksdb.CappedPrefix.24
cf1 PREFIX_EXTRACTOR rocksdb.CappedPrefix.24
default PREFIX_EXTRACTOR rocksdb.CappedPrefix.24
SET @@global.rocksdb_update_cf_options = 'cf1={prefix_extractor=capped:26};';
Restarting with new Prefix Extractor...
Changed Prefix Extractor (after restart):
Changed Prefix Extractor (after update_cf_options set, without restart):
SELECT * FROM information_schema.rocksdb_cf_options WHERE option_type like '%prefix_extractor%';
CF_NAME OPTION_TYPE VALUE
......@@ -65,6 +63,7 @@ COUNT(*)
select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
variable_value-@u
2
SET @@global.rocksdb_update_cf_options = '';
set global rocksdb_compact_cf='cf1';
select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=30 AND id3=30;
......
......@@ -16,7 +16,7 @@ SELECT * FROM t1;
id value
1 1
INSERT INTO t1 values (2, 2);
ERROR HY000: Can't execute updates when you started a transaction with START TRANSACTION WITH CONSISTENT [ROCKSDB] SNAPSHOT.
ERROR HY000: Can't execute updates when you started a transaction with START TRANSACTION WITH CONSISTENT|SHARED|EXISTING [ROCKSDB] SNAPSHOT.
ROLLBACK;
SELECT * FROM t1;
id value
......
......@@ -866,6 +866,7 @@ rocksdb_block_restart_interval 16
rocksdb_block_size 4096
rocksdb_block_size_deviation 10
rocksdb_bulk_load OFF
rocksdb_bulk_load_allow_sk OFF
rocksdb_bulk_load_allow_unsorted OFF
rocksdb_bulk_load_size 1000
rocksdb_bytes_per_sync 0
......@@ -873,6 +874,7 @@ rocksdb_cache_index_and_filter_blocks ON
rocksdb_checksums_pct 100
rocksdb_collect_sst_properties ON
rocksdb_commit_in_the_middle OFF
rocksdb_commit_time_batch_for_recovery OFF
rocksdb_compact_cf
rocksdb_compaction_readahead_size 0
rocksdb_compaction_sequential_deletes 0
......@@ -886,6 +888,7 @@ rocksdb_datadir ./.rocksdb
rocksdb_db_write_buffer_size 0
rocksdb_deadlock_detect OFF
rocksdb_deadlock_detect_depth 50
rocksdb_debug_manual_compaction_delay 0
rocksdb_debug_optimizer_no_zero_cardinality ON
rocksdb_debug_ttl_ignore_pk OFF
rocksdb_debug_ttl_read_filter_ts 0
......@@ -901,6 +904,7 @@ rocksdb_enable_ttl ON
rocksdb_enable_ttl_read_filtering ON
rocksdb_enable_write_thread_adaptive_yield OFF
rocksdb_error_if_exists OFF
rocksdb_error_on_suboptimal_collation ON
rocksdb_flush_log_at_trx_commit 1
rocksdb_force_compute_memtable_stats ON
rocksdb_force_compute_memtable_stats_cachetime 0
......@@ -919,12 +923,14 @@ rocksdb_lock_scanned_rows OFF
rocksdb_lock_wait_timeout 1
rocksdb_log_file_time_to_roll 0
rocksdb_manifest_preallocation_size 4194304
rocksdb_manual_compaction_threads 0
rocksdb_manual_wal_flush ON
rocksdb_master_skip_tx_api OFF
rocksdb_max_background_jobs 2
rocksdb_max_latest_deadlocks 5
rocksdb_max_log_file_size 0
rocksdb_max_manifest_file_size 18446744073709551615
rocksdb_max_manifest_file_size 1073741824
rocksdb_max_manual_compactions 10
rocksdb_max_row_locks 1048576
rocksdb_max_subcompactions 1
rocksdb_max_total_wal_size 0
......@@ -953,6 +959,7 @@ rocksdb_skip_fill_cache OFF
rocksdb_skip_unique_check_tables .*
rocksdb_sst_mgr_rate_bytes_per_sec 0
rocksdb_stats_dump_period_sec 600
rocksdb_stats_recalc_rate 0
rocksdb_store_row_debug_checksums OFF
rocksdb_strict_collation_check OFF
rocksdb_strict_collation_exceptions
......@@ -979,6 +986,7 @@ rocksdb_whole_key_filtering ON
rocksdb_write_batch_max_bytes 0
rocksdb_write_disable_wal OFF
rocksdb_write_ignore_missing_column_families OFF
rocksdb_write_policy write_committed
create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb;
insert into t47 values (1, 'row1');
insert into t47 values (2, 'row2');
......@@ -1332,7 +1340,7 @@ insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables
set @tmp1= @@rocksdb_max_row_locks;
set rocksdb_max_row_locks= 20;
update t1 set a=a+10;
ERROR HY000: Status error 10 received from RocksDB: Operation aborted: Failed to acquire lock due to max_num_locks limit
ERROR HY000: Got error 10 'Operation aborted: Failed to acquire lock due to max_num_locks limit' from ROCKSDB
DROP TABLE t1;
#
# Test AUTO_INCREMENT behavior problem,
......@@ -1476,6 +1484,8 @@ rocksdb_block_cache_index_miss #
rocksdb_block_cache_miss #
rocksdb_block_cachecompressed_hit #
rocksdb_block_cachecompressed_miss #
rocksdb_bloom_filter_full_positive #
rocksdb_bloom_filter_full_true_positive #
rocksdb_bloom_filter_prefix_checked #
rocksdb_bloom_filter_prefix_useful #
rocksdb_bloom_filter_useful #
......@@ -1494,6 +1504,8 @@ rocksdb_getupdatessince_calls #
rocksdb_git_date #
rocksdb_git_hash #
rocksdb_iter_bytes_read #
rocksdb_manual_compactions_processed #
rocksdb_manual_compactions_running #
rocksdb_memtable_hit #
rocksdb_memtable_miss #
rocksdb_no_file_closes #
......@@ -1585,6 +1597,8 @@ ROCKSDB_BLOCK_CACHE_INDEX_MISS
ROCKSDB_BLOCK_CACHE_MISS
ROCKSDB_BLOCK_CACHECOMPRESSED_HIT
ROCKSDB_BLOCK_CACHECOMPRESSED_MISS
ROCKSDB_BLOOM_FILTER_FULL_POSITIVE
ROCKSDB_BLOOM_FILTER_FULL_TRUE_POSITIVE
ROCKSDB_BLOOM_FILTER_PREFIX_CHECKED
ROCKSDB_BLOOM_FILTER_PREFIX_USEFUL
ROCKSDB_BLOOM_FILTER_USEFUL
......@@ -1603,6 +1617,8 @@ ROCKSDB_GETUPDATESSINCE_CALLS
ROCKSDB_GIT_DATE
ROCKSDB_GIT_HASH
ROCKSDB_ITER_BYTES_READ
ROCKSDB_MANUAL_COMPACTIONS_PROCESSED
ROCKSDB_MANUAL_COMPACTIONS_RUNNING
ROCKSDB_MEMTABLE_HIT
ROCKSDB_MEMTABLE_MISS
ROCKSDB_NO_FILE_CLOSES
......@@ -1696,6 +1712,8 @@ ROCKSDB_BLOCK_CACHE_INDEX_MISS
ROCKSDB_BLOCK_CACHE_MISS
ROCKSDB_BLOCK_CACHECOMPRESSED_HIT
ROCKSDB_BLOCK_CACHECOMPRESSED_MISS
ROCKSDB_BLOOM_FILTER_FULL_POSITIVE
ROCKSDB_BLOOM_FILTER_FULL_TRUE_POSITIVE
ROCKSDB_BLOOM_FILTER_PREFIX_CHECKED
ROCKSDB_BLOOM_FILTER_PREFIX_USEFUL
ROCKSDB_BLOOM_FILTER_USEFUL
......@@ -1714,6 +1732,8 @@ ROCKSDB_GETUPDATESSINCE_CALLS
ROCKSDB_GIT_DATE
ROCKSDB_GIT_HASH
ROCKSDB_ITER_BYTES_READ
ROCKSDB_MANUAL_COMPACTIONS_PROCESSED
ROCKSDB_MANUAL_COMPACTIONS_RUNNING
ROCKSDB_MEMTABLE_HIT
ROCKSDB_MEMTABLE_MISS
ROCKSDB_NO_FILE_CLOSES
......
......@@ -52,9 +52,9 @@ create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksd
check table t4;
Table Op Msg_type Msg_text
test.t4 check status OK
10000 index entries had around 500 checksums
10000 index entries had around 500 checksums
Around 500 table records had checksums
4000 index entries had around 200 checksums
4000 index entries had around 200 checksums
Around 200 table records had checksums
set session rocksdb_checksums_pct=100;
#
# Ok, table t2 has all rows with checksums. Simulate a few checksum mismatches.
......
......@@ -40,15 +40,27 @@ i
3
insert into t values (4), (1);
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
# Statement should be rolled back
# Transaction should be rolled back
select * from t;
i
3
rollback;
i
rollback;
i
rollback;
create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb;
insert into t1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
begin;
update t1 force index (value) set value2=value2+1 where value=3;
begin;
update t1 force index (value) set value2=value2+1 where value=2;
update t1 force index (value) set value2=value2+1 where value=4;
update t1 force index (value) set value2=value2+1 where value=4;
update t1 force index (value) set value2=value2+1 where value=3;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
rollback;
rollback;
drop table t1;
set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect;
drop table t,r1,r2;
......@@ -40,15 +40,27 @@ i
3
insert into t values (4), (1);
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
# Statement should be rolled back
# Transaction should be rolled back
select * from t;
i
3
rollback;
i
rollback;
i
rollback;
create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb;
insert into t1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
begin;
update t1 force index (value) set value2=value2+1 where value=3;
begin;
update t1 force index (value) set value2=value2+1 where value=2;
update t1 force index (value) set value2=value2+1 where value=4;
update t1 force index (value) set value2=value2+1 where value=4;
update t1 force index (value) set value2=value2+1 where value=3;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
rollback;
rollback;
drop table t1;
set global rocksdb_lock_wait_timeout = @prior_rocksdb_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_rocksdb_deadlock_detect;
drop table t,r1,r2;
create table t1 (a int primary key, b int unique key) engine = rocksdb;
insert into t1 values(1, 1);
begin;
update t1 set b = 2 where b = 1;
insert into t1 values(2, 1);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on index: test.t1.b
rollback;
select * from t1;
a b
1 1
drop table t1;
......@@ -136,6 +136,7 @@ __system__ TABLE_FACTORY::FLUSH_BLOCK_POLICY_FACTORY #
__system__ TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
__system__ TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS_WITH_HIGH_PRIORITY #
__system__ TABLE_FACTORY::PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE #
__system__ TABLE_FACTORY::PIN_TOP_LEVEL_INDEX_AND_FILTER #
__system__ TABLE_FACTORY::INDEX_TYPE #
__system__ TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
__system__ TABLE_FACTORY::CHECKSUM #
......@@ -162,6 +163,7 @@ __system__ TABLE_FACTORY::VERIFY_COMPRESSION #
__system__ TABLE_FACTORY::READ_AMP_BYTES_PER_BIT #
__system__ TABLE_FACTORY::FORMAT_VERSION #
__system__ TABLE_FACTORY::ENABLE_INDEX_COMPRESSION #
__system__ TABLE_FACTORY::BLOCK_ALIGN #
cf_t1 COMPARATOR #
cf_t1 MERGE_OPERATOR #
cf_t1 COMPACTION_FILTER #
......@@ -207,6 +209,7 @@ cf_t1 TABLE_FACTORY::FLUSH_BLOCK_POLICY_FACTORY #
cf_t1 TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
cf_t1 TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS_WITH_HIGH_PRIORITY #
cf_t1 TABLE_FACTORY::PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE #
cf_t1 TABLE_FACTORY::PIN_TOP_LEVEL_INDEX_AND_FILTER #
cf_t1 TABLE_FACTORY::INDEX_TYPE #
cf_t1 TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
cf_t1 TABLE_FACTORY::CHECKSUM #
......@@ -233,6 +236,7 @@ cf_t1 TABLE_FACTORY::VERIFY_COMPRESSION #
cf_t1 TABLE_FACTORY::READ_AMP_BYTES_PER_BIT #
cf_t1 TABLE_FACTORY::FORMAT_VERSION #
cf_t1 TABLE_FACTORY::ENABLE_INDEX_COMPRESSION #
cf_t1 TABLE_FACTORY::BLOCK_ALIGN #
default COMPARATOR #
default MERGE_OPERATOR #
default COMPACTION_FILTER #
......@@ -278,6 +282,7 @@ default TABLE_FACTORY::FLUSH_BLOCK_POLICY_FACTORY #
default TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
default TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS_WITH_HIGH_PRIORITY #
default TABLE_FACTORY::PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE #
default TABLE_FACTORY::PIN_TOP_LEVEL_INDEX_AND_FILTER #
default TABLE_FACTORY::INDEX_TYPE #
default TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
default TABLE_FACTORY::CHECKSUM #
......@@ -304,6 +309,7 @@ default TABLE_FACTORY::VERIFY_COMPRESSION #
default TABLE_FACTORY::READ_AMP_BYTES_PER_BIT #
default TABLE_FACTORY::FORMAT_VERSION #
default TABLE_FACTORY::ENABLE_INDEX_COMPRESSION #
default TABLE_FACTORY::BLOCK_ALIGN #
rev:cf_t2 COMPARATOR #
rev:cf_t2 MERGE_OPERATOR #
rev:cf_t2 COMPACTION_FILTER #
......@@ -349,6 +355,7 @@ rev:cf_t2 TABLE_FACTORY::FLUSH_BLOCK_POLICY_FACTORY #
rev:cf_t2 TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
rev:cf_t2 TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS_WITH_HIGH_PRIORITY #
rev:cf_t2 TABLE_FACTORY::PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE #
rev:cf_t2 TABLE_FACTORY::PIN_TOP_LEVEL_INDEX_AND_FILTER #
rev:cf_t2 TABLE_FACTORY::INDEX_TYPE #
rev:cf_t2 TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
rev:cf_t2 TABLE_FACTORY::CHECKSUM #
......@@ -375,6 +382,7 @@ rev:cf_t2 TABLE_FACTORY::VERIFY_COMPRESSION #
rev:cf_t2 TABLE_FACTORY::READ_AMP_BYTES_PER_BIT #
rev:cf_t2 TABLE_FACTORY::FORMAT_VERSION #
rev:cf_t2 TABLE_FACTORY::ENABLE_INDEX_COMPRESSION #
rev:cf_t2 TABLE_FACTORY::BLOCK_ALIGN #
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
......@@ -419,4 +427,49 @@ END OF ROCKSDB TRANSACTION MONITOR OUTPUT
=========================================
ROLLBACK;
START TRANSACTION WITH SHARED ROCKSDB SNAPSHOT;
File Position Gtid_executed Snapshot_ID
0 1
SHOW ENGINE rocksdb STATUS;
Type Name Status
STATISTICS # #
DBSTATS # #
CF_COMPACTION # #
CF_COMPACTION # #
CF_COMPACTION # #
CF_COMPACTION # #
MEMORY_STATS # #
BG_THREADS # #
BG_THREADS # #
EXPLICIT_SNAPSHOTS # #
ROLLBACK;
CREATE EXPLICIT rocksdb SNAPSHOT;
File Position Gtid_executed Snapshot_ID
0 2
SHOW ENGINE rocksdb STATUS;
Type Name Status
STATISTICS # #
DBSTATS # #
CF_COMPACTION # #
CF_COMPACTION # #
CF_COMPACTION # #
CF_COMPACTION # #
MEMORY_STATS # #
BG_THREADS # #
BG_THREADS # #
EXPLICIT_SNAPSHOTS # #
RELEASE EXPLICIT rocksdb SNAPSHOT;
File Position Gtid_executed Snapshot_ID
0 2
SHOW ENGINE rocksdb STATUS;
Type Name Status
STATISTICS # #
DBSTATS # #
CF_COMPACTION # #
CF_COMPACTION # #
CF_COMPACTION # #
CF_COMPACTION # #
MEMORY_STATS # #
BG_THREADS # #
BG_THREADS # #
SET GLOBAL rocksdb_max_background_jobs= @save.rocksdb_max_background_jobs;
......@@ -934,3 +934,44 @@ value
3
rollback;
drop table t1;
#
# #802: MyRocks: Statement rollback doesnt work correctly for nested statements
#
create table t1 (a varchar(100)) engine=rocksdb;
create table t2(a int) engine=rocksdb;
insert into t2 values (1), (2);
create table t3(a varchar(100)) engine=rocksdb;
create function func() returns varchar(100) deterministic
begin
insert into t3 values ('func-called');
set @a= (select a from t2);
return 'func-returned';
end;//
begin;
insert into t1 values (func());
ERROR 21000: Subquery returns more than 1 row
select * from t1;
a
# The following must not produce 'func-called':
select * from t3;
a
rollback;
drop function func;
drop table t1,t2,t3;
#
# MDEV-16710: Slave SQL: Could not execute Update_rows_v1 event with RocksDB and triggers
# Issue#857: MyRocks: Incorrect behavior when multiple statements fail inside a transaction
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=RocksDB;
INSERT INTO t1 VALUES (1);
CREATE TABLE t2 (b INT PRIMARY KEY) ENGINE=RocksDB;
CREATE TRIGGER tr AFTER INSERT ON t2 FOR EACH ROW INSERT INTO non_existing_table VALUES (NULL);
BEGIN;
DELETE FROM t1;
INSERT INTO t2 VALUES (1);
INSERT INTO t2 VALUES (2);
# Must return empty result:
SELECT * FROM t1;
a
COMMIT;
drop table t1,t2;
......@@ -5,7 +5,7 @@ Note #### Storing MySQL user name or password information in the master info rep
[connection master]
DROP TABLE IF EXISTS t1;
include/stop_slave.inc
create table t1 (a int) engine=rocksdb;
create table t1 (a int, b int, primary key (a), unique key (b)) engine=rocksdb;
show variables like 'rpl_skip_tx_api';
Variable_name Value
rpl_skip_tx_api ON
......
......@@ -111,3 +111,11 @@ a b pk
55 NULL 11
10050 NULL 12
DROP TABLE t1;
CREATE TABLE t1 (a INT, b CHAR(8), UNIQUE INDEX(a)) ENGINE=RocksDB;
INSERT INTO t1 (a,b) VALUES (1,'foo'),(2,'bar');
UPDATE t1 SET a=a+100;
SELECT * FROM t1;
a b
101 foo
102 bar
DROP TABLE t1;
Checking direct reads
CREATE TABLE t1 (pk INT PRIMARY KEY DEFAULT '0', a INT(11), b CHAR(8)) ENGINE=rocksdb;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`pk` int(11) NOT NULL DEFAULT '0',
`a` int(11) DEFAULT NULL,
`b` char(8) DEFAULT NULL,
PRIMARY KEY (`pk`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
INSERT INTO t1 VALUES (1, 1,'a');
INSERT INTO t1 (a,b) VALUES (2,'b');
set global rocksdb_force_flush_memtable_now=1;
SELECT a,b FROM t1;
a b
1 a
2 b
DROP TABLE t1;
......@@ -2,8 +2,5 @@ DROP TABLE IF EXISTS t1, t2;
CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB;
CREATE TABLE t2 (pk int primary key) ENGINE=ROCKSDB PARTITION BY KEY(pk) PARTITIONS 4;
Expect errors that we are missing two .frm files
RocksDB: Schema mismatch - Table test.t1 is registered in RocksDB but does not have a .frm file
RocksDB: Schema mismatch - Table test.t2 is registered in RocksDB but does not have a .frm file
Expect an error that we have an extra .frm file
RocksDB: Schema mismatch - A .frm file exists for table test.t1_dummy, but that table is not registered in RocksDB
DROP TABLE t1, t2;
......@@ -383,6 +383,24 @@ if ($end_max_index_id <= $start_max_index_id) {
SHOW CREATE TABLE t1;
DROP TABLE t1;
# Cardinality checks for indexes statistics
SET @prior_rocksdb_table_stats_sampling_pct = @@rocksdb_table_stats_sampling_pct;
set global rocksdb_table_stats_sampling_pct = 100;
CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB;
INSERT INTO t1 (a, b) VALUES (1, 10);
INSERT INTO t1 (a, b) VALUES (2, 10);
INSERT INTO t1 (a, b) VALUES (3, 20);
INSERT INTO t1 (a, b) VALUES (4, 20);
set global rocksdb_force_flush_memtable_now=1;
analyze table t1;
SHOW INDEX in t1;
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
SHOW INDEX in t1;
DROP TABLE t1;
SET global rocksdb_table_stats_sampling_pct = @prior_rocksdb_table_stats_sampling_pct;
--source include/have_rocksdb.inc
--source include/have_debug.inc
--source include/not_valgrind.inc
--disable_warnings
drop table if exists t1;
......
......@@ -63,7 +63,7 @@ ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
--disable_warnings
# now do same index using copy algorithm
# hitting max row locks (1M)
--error ER_RDB_STATUS_GENERAL
--error ER_GET_ERRMSG
ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
set session rocksdb_bulk_load=1;
ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
......
......@@ -96,3 +96,31 @@ DELETE FROM t1 WHERE a = 35 AND b = 'foo';
--sorted_result
SELECT * FROM t1;
DROP TABLE t1;
--echo #
--echo # Issue #834/MDEV-15304 ALTER TABLE table_with_hidden_pk causes Can't
--echo # write; duplicate key in table error and/or crash
--echo #
CREATE TABLE t1 (a INT, KEY(a)) ENGINE=RocksDB;
INSERT INTO t1 VALUES (1),(1+1);
create table t2 (a int);
insert into t2 values (10),(20),(30);
BEGIN;
select * from t2;
connect (con1,localhost,root,,);
connection con1;
alter table t1 force;
connection default;
select * from t1;
connection con1;
insert into t1 values (100);
select * from t1;
disconnect con1;
connection default;
rollback;
drop table t1,t2;
--source include/have_rocksdb.inc
--source include/have_debug.inc
--source include/have_log_bin.inc
--source include/not_valgrind.inc
--echo #
--echo # Testing upgrading from server without merges for auto_increment
......@@ -64,8 +65,8 @@ commit;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--source include/wait_until_connected_again.inc
--disable_reconnect
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
select max(i) from t;
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
--echo # After engine prepare
begin;
......@@ -80,8 +81,8 @@ commit;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--source include/wait_until_connected_again.inc
--disable_reconnect
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
select max(i) from t;
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
--echo # After binlog
begin;
......@@ -96,8 +97,8 @@ commit;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--source include/wait_until_connected_again.inc
--disable_reconnect
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
select max(i) from t;
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
--echo # After everything
begin;
......@@ -112,7 +113,7 @@ commit;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--source include/wait_until_connected_again.inc
--disable_reconnect
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
select max(i) from t;
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
drop table t;
......@@ -101,3 +101,26 @@ SHOW CREATE TABLE t1;
INSERT INTO t1 VALUES (NULL, 'c');
SELECT * FROM t1;
DROP TABLE t1;
--echo #----------------------------------
--echo # Issue #792 Crash in autoincrement
--echo #----------------------------------
CREATE TABLE t1(C1 DOUBLE AUTO_INCREMENT KEY,C2 CHAR) ENGINE=ROCKSDB;
INSERT INTO t1 VALUES(2177,0);
DROP TABLE t1;
CREATE TABLE t0(c0 BLOB) ENGINE=ROCKSDB;
INSERT INTO t0 VALUES(0);
ALTER TABLE t0 AUTO_INCREMENT=0;
DROP TABLE t0;
--echo #----------------------------------
--echo # Issue #869 Crash in autoincrement
--echo #----------------------------------
CREATE TABLE t1 (pk INT AUTO_INCREMENT, a INT, PRIMARY KEY(pk)) ENGINE=RocksDB;
INSERT INTO t1 (a) VALUES (1);
UPDATE t1 SET pk = 3;
ALTER TABLE t1 AUTO_INCREMENT 2;
DROP TABLE t1;
--rocksdb_default_cf_options=write_buffer_size=256k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;}
--rocksdb_override_cf_options=rev:bf5_1={prefix_extractor=capped:4};
--echo #
--echo # Issue #809: Wrong query result with bloom filters
--echo #
create table t1 (
id1 bigint not null,
id2 bigint not null,
id3 varchar(100) not null,
id4 int not null,
id5 int not null,
value bigint,
value2 varchar(100),
primary key (id1, id2, id3, id4) COMMENT 'rev:bf5_1'
) engine=ROCKSDB;
create table t2(a int);
insert into t2 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t3(seq int);
insert into t3
select
1+ A.a + B.a* 10 + C.a * 100 + D.a * 1000
from t2 A, t2 B, t2 C, t2 D;
insert t1
select
(seq+9) div 10, (seq+4) div 5, (seq+4) div 5, seq, seq, 1000, "aaabbbccc"
from t3;
set global rocksdb_force_flush_memtable_now=1;
--echo # Full table scan
explain
select * from t1 limit 10;
select * from t1 limit 10;
--echo # An index scan starting from the end of the table:
explain
select * from t1 order by id1 desc,id2 desc, id3 desc, id4 desc limit 1;
select * from t1 order by id1 desc,id2 desc, id3 desc, id4 desc limit 1;
# A testcase for an assertion that the fix is removing
# The only requirement for the used column family is that it is reverse-ordered
create table t4 (
pk int unsigned not null primary key,
kp1 int unsigned not null,
kp2 int unsigned not null,
col1 int unsigned,
key(kp1, kp2) comment 'rev:bf5_2'
) engine=rocksdb;
insert into t4 values (1, 0xFFFF, 0xFFF, 12345);
--echo # This must not fail an assert:
select * from t4 force index(kp1) where kp1=0xFFFFFFFF and kp2<=0xFFFFFFFF order by kp2 desc;
drop table t1,t2,t3,t4;
--rocksdb_default_cf_options=write_buffer_size=16k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:12
--rocksdb_override_cf_options=cf2={optimize_filters_for_hits=true}
--source include/have_rocksdb.inc
create table r1 (id bigint primary key, value bigint) engine=rocksdb;
create table r2 (id bigint, value bigint, primary key (id) comment 'cf2') engine=rocksdb;
set session rocksdb_bulk_load=1;
--disable_query_log
let $t = 1;
let $i = 1;
while ($t <= 2) {
while ($i <= 1000) {
let $insert = INSERT INTO r$t VALUES($i, $i);
#skipping a row
if ($i != 100) {
eval $insert;
}
inc $i;
}
inc $t;
}
--enable_query_log
set session rocksdb_bulk_load=0;
# bloom filter should be useful on insert (calling GetForUpdate)
select variable_value into @h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
insert into r1 values (100, 100);
select variable_value-@h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
# cf2 has no bloo filter in the bottommost level
select variable_value into @h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
insert into r2 values (100, 100);
select variable_value-@h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
DROP TABLE r1, r2;
--source include/have_rocksdb.inc
SET rocksdb_bulk_load_size=15;
CREATE TABLE t4 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
CREATE TABLE t3 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
CREATE TABLE t2 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
CREATE TABLE t1 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
### Setup the control table ###
--disable_query_log
let $sign = 1;
let $max = 10;
let $i = 1;
while ($i <= $max) {
let $a = 1 + $sign * $i;
let $b = 1 - $sign * $i;
let $sign = -$sign;
let $insert = INSERT INTO t3 VALUES ($a, $b, $b);
eval $insert;
inc $i;
}
--enable_query_log
### Bulk load PK only ###
SET rocksdb_bulk_load=1;
INSERT INTO t1 SELECT * FROM t3 FORCE INDEX (PRIMARY) ORDER BY a;
SELECT count(*) FROM t1 FORCE INDEX (PRIMARY);
SELECT count(*) FROM t1 FORCE INDEX (b);
SELECT count(*) FROM t1 FORCE INDEX (c);
SET rocksdb_bulk_load=0;
SELECT * FROM t1 FORCE INDEX (PRIMARY);
SELECT b FROM t1 FORCE INDEX (b);
SELECT c FROM t1 FORCE INDEX (c);
--echo Checksums should match
CHECKSUM TABLE t3;
CHECKSUM TABLE t1;
### Bulk load PK and SK but require PK order ###
SET rocksdb_bulk_load_allow_sk=1;
SET rocksdb_bulk_load=1;
INSERT INTO t4 SELECT * FROM t3 FORCE INDEX (PRIMARY) ORDER BY a;
SELECT count(*) FROM t4 FORCE INDEX (PRIMARY);
SELECT count(*) FROM t4 FORCE INDEX (b);
SELECT count(*) FROM t4 FORCE INDEX (c);
SET rocksdb_bulk_load=0;
SELECT * FROM t4 FORCE INDEX (PRIMARY);
SELECT b FROM t4 FORCE INDEX (b);
SELECT c FROM t4 FORCE INDEX (c);
--echo Checksums should match
CHECKSUM TABLE t3;
CHECKSUM TABLE t4;
### Bulk load both PK and SK in random order for all ###
SET rocksdb_bulk_load_allow_unsorted=1;
SET rocksdb_bulk_load_allow_sk=1;
SET rocksdb_bulk_load=1;
INSERT INTO t2 SELECT * FROM t3 WHERE b >= 0 ORDER BY b;
INSERT INTO t2 SELECT * FROM t3 WHERE b < 0 ORDER BY b;
SELECT count(*) FROM t2 FORCE INDEX (PRIMARY);
SELECT count(*) FROM t2 FORCE INDEX (b);
SELECT count(*) FROM t2 FORCE INDEX (c);
--disable_query_log
let $sign = 1;
let $max = 20;
let $i = 11;
while ($i <= $max) {
let $a = 1 + $sign * $i;
let $b = 1 - $sign * $i;
let $sign = -$sign;
let $insert = INSERT INTO t2 VALUES ($a, $b, $b);
eval $insert;
inc $i;
}
--enable_query_log
SELECT count(*) FROM t2 FORCE INDEX (PRIMARY);
SELECT count(*) FROM t2 FORCE INDEX (b);
SELECT count(*) FROM t2 FORCE INDEX (c);
SET rocksdb_bulk_load=0;
--disable_query_log
let $sign = 1;
let $max = 20;
let $i = 11;
while ($i <= $max) {
let $a = 1 + $sign * $i;
let $b = 1 - $sign * $i;
let $sign = -$sign;
let $insert = INSERT INTO t3 VALUES ($a, $b, $b);
eval $insert;
inc $i;
}
--enable_query_log
SELECT * FROM t2 FORCE INDEX (PRIMARY);
SELECT b FROM t2 FORCE INDEX (b);
SELECT c FROM t2 FORCE INDEX (c);
--echo Checksums should match
CHECKSUM TABLE t3;
CHECKSUM TABLE t2;
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
DROP TABLE t4;
......@@ -79,5 +79,24 @@ SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema
show index in t1;
SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
drop table t1;
CREATE TABLE t2 (a INT, b INT, c INT, d INT, e INT, f INT, g INT,
PRIMARY KEY (a), KEY (c, b, a, d, e, f, g))
ENGINE=ROCKSDB;
--disable_query_log
let $i=0;
while ($i<100)
{
inc $i;
eval insert t2 values($i, $i div 10, 1, 1, 1, 1, 1);
}
--enable_query_log
# Cardinality of key c should be 1 for c, 10 for b, 100 for a and the other fields.
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
ANALYZE TABLE t2;
--echo cardinality of the columns after 'a' must be equal to the cardinality of column 'a'
SELECT CARDINALITY INTO @c FROM information_schema.statistics WHERE TABLE_NAME='t2' AND INDEX_NAME='c' AND COLUMN_NAME='a';
SELECT COLUMN_NAME, CARDINALITY = @c FROM information_schema.statistics WHERE TABLE_NAME='t2' AND INDEX_NAME='c' AND SEQ_IN_INDEX > 3;
drop table t1, t2;
......@@ -4,7 +4,8 @@ let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect;
let $error_log= $MYSQLTEST_VARDIR/log/my_restart.err;
select variable_name, variable_value from information_schema.global_variables where variable_name="rocksdb_ignore_unknown_options";
--exec find $MYSQLD_DATADIR/.rocksdb/OPTIONS* | sort -n | tail -1 | xargs -0 -I {} -t sh -c "echo hello=world>>{}"
--exec find $MYSQLD_DATADIR/.rocksdb/OPTIONS* | sort -t- -k 2 -n | tail -1 | xargs -0 -I {} -t sh -c "sed -i 's/rocksdb_version=.*/rocksdb_version=99.9.9/' {}"
--exec find $MYSQLD_DATADIR/.rocksdb/OPTIONS* | sort -t- -k 2 -n | tail -1 | xargs -0 -I {} -t sh -c "echo hello=world>>{}"
--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--shutdown_server 10
......@@ -17,5 +18,4 @@ let SEARCH_PATTERN= RocksDB: Compatibility check against existing database optio
--enable_reconnect
--exec echo "restart" > $restart_file
--source include/wait_until_connected_again.inc
--exec find $MYSQLD_DATADIR/.rocksdb/OPTIONS* | sort -n | tail -1 | xargs -0 -I {} -t sh -c "sed -i '/hello=world/d' {}"
select variable_name, variable_value from information_schema.global_variables where variable_name="rocksdb_ignore_unknown_options";
--source include/have_rocksdb.inc
--source include/have_fullregex.inc
SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
call mtr.add_suppression("Invalid pattern");
# ci non-indexed column is allowed
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8;
# ci indexed column is not allowed
--error ER_UNSUPPORTED_COLLATION
ALTER TABLE t1 ADD INDEX (value);
DROP TABLE t1;
# ci indexed column is not allowed
......@@ -28,6 +27,8 @@ DROP TABLE t1;
# cs latin1_bin is allowed
CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin;
# THIS SHOULD FAIL BUT IT DOES NOT
ALTER TABLE t1 collate=latin1_general_ci;
DROP TABLE t1;
# cs utf8_bin is allowed
......@@ -153,9 +154,8 @@ DROP TABLE t2;
# test invalid regex (missing end bracket)
--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
SET GLOBAL rocksdb_strict_collation_exceptions="[a-b";
--exec grep "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
--exec grep "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 | tail -n 1
--error ER_UNSUPPORTED_COLLATION
CREATE TABLE a (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
SET GLOBAL rocksdb_strict_collation_exceptions="[a-b]";
......@@ -166,9 +166,8 @@ CREATE TABLE c (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rock
DROP TABLE a, b;
# test invalid regex (trailing escape)
--exec echo "" >$MYSQLTEST_VARDIR/log/mysqld.1.err
SET GLOBAL rocksdb_strict_collation_exceptions="abc\\";
--exec grep "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2
--exec grep "Invalid pattern" $MYSQLTEST_VARDIR/log/mysqld.1.err | cut -d] -f2 | tail -n 1
--error ER_UNSUPPORTED_COLLATION
CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
SET GLOBAL rocksdb_strict_collation_exceptions="abc";
......@@ -180,5 +179,28 @@ DROP TABLE abc;
# test bad regex (null caused a crash) - Issue 493
SET GLOBAL rocksdb_strict_collation_exceptions=null;
# test for warnings instead of errors
--let $_mysqld_option=--rocksdb_error_on_suboptimal_collation=0
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
--source include/restart_mysqld_with_option.inc
SET GLOBAL rocksdb_strict_collation_check=1;
# ci indexed column is not optimal, should emit a warning
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8;
DROP TABLE t1;
# ci non-indexed column is allowed
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8;
# ci indexed column is not allowed, should emit a warning
ALTER TABLE t1 ADD INDEX (value);
DROP TABLE t1;
# cs latin1_bin is allowed
CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin;
# THIS SHOULD WARN BUT IT DOES NOT
ALTER TABLE t1 collate=latin1_general_ci;
DROP TABLE t1;
# cleanup
SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value;
--source include/restart_mysqld.inc
!include suite/rpl/my.cnf
[mysqld.1]
binlog_format=row
This diff is collapsed.
USE mysql;
CREATE TABLE mysql_table (a INT) ENGINE=ROCKSDB;
-- error ER_BLOCK_NO_PRIMARY_KEY
CREATE TABLE test.mysql_table (a INT) ENGINE=ROCKSDB;
USE test;
-- error ER_BLOCK_NO_PRIMARY_KEY
CREATE TABLE mysql_table (a INT) ENGINE=ROCKSDB;
-- error ER_BLOCK_NO_PRIMARY_KEY
CREATE TABLE IF NOT EXISTS mysql_table_2 (a INT) ENGINE=ROCKSDB;
-- error ER_BLOCK_NO_PRIMARY_KEY
CREATE TABLE mysql_table_no_cols ENGINE=ROCKSDB;
CREATE TABLE mysql.mysql_table_2 (a INT) ENGINE=ROCKSDB;
CREATE TABLE mysql_primkey (a INT PRIMARY KEY, b INT, c INT, d INT, INDEX (c)) ENGINE=ROCKSDB;
ALTER TABLE mysql_primkey DROP b, DROP a, ADD (f INT PRIMARY KEY);
-- error ER_BLOCK_NO_PRIMARY_KEY
ALTER TABLE mysql_primkey DROP PRIMARY KEY;
CREATE TABLE mysql_primkey2 (a INT PRIMARY KEY, b INT, c INT) ENGINE=ROCKSDB;
ALTER TABLE mysql_primkey2 DROP b;
ALTER TABLE mysql_primkey2 ADD (b INT);
-- error ER_BLOCK_NO_PRIMARY_KEY
ALTER TABLE mysql_primkey2 DROP c, DROP A;
CREATE TABLE mysql_primkey3 (a INT PRIMARY KEY, b INT, c INT, INDEX indexonb (b), INDEX indexonc (c)) ENGINE=ROCKSDB;
ALTER TABLE mysql_primkey3 DROP INDEX indexonb;
ALTER TABLE mysql_primkey3 DROP c;
ALTER TABLE mysql_primkey3 DROP PRIMARY KEY, ADD PRIMARY KEY(b);
CREATE TABLE mysql_primkey4(a INT, b INT, PRIMARY KEY(a), INDEX si (a, b)) ENGINE=ROCKSDB;
DROP INDEX si ON mysql_primkey4;
-- error ER_BLOCK_NO_PRIMARY_KEY
DROP INDEX `PRIMARY` ON mysql_primkey4;
ALTER TABLE mysql.mysql_table ADD PRIMARY KEY (a);
ALTER TABLE mysql.mysql_table DROP PRIMARY KEY;
DROP TABLE mysql_primkey;
DROP TABLE mysql_primkey2;
DROP TABLE mysql_primkey3;
DROP TABLE mysql_primkey4;
USE mysql;
DROP TABLE mysql_table;
DROP TABLE mysql_table_2;
--log-bin --binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log-slave-updates
This diff is collapsed.
......@@ -46,6 +46,9 @@ WHERE INDEX_NUMBER =
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
WHERE TABLE_NAME = 't2' AND INDEX_NAME = "PRIMARY");
# The number of sst files should be 1 or more
select count(*) > 0 from information_schema.rocksdb_sst_props;
# cleanup
DROP TABLE t1;
DROP TABLE t2;
--rocksdb_strict_collation_check=off --binlog_format=row --log-bin
--rocksdb_strict_collation_check=off --binlog_format=row --log-bin --rocksdb_records_in_range=2
......@@ -17,6 +17,7 @@ select VALUE into @keysIn from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where
CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB;
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3);
set global rocksdb_force_flush_memtable_now = true;
--let $MASTER_UUID = query_get_value(SELECT @@SERVER_UUID, @@SERVER_UUID, 1)
--let $max_index_id = query_get_value(SELECT * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO where type = 'MAX_INDEX_ID', VALUE, 1)
--replace_result $MASTER_UUID uuid $max_index_id max_index_id
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment