Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
d71df7e1
Commit
d71df7e1
authored
Mar 05, 2017
by
Vicențiu Ciorbaru
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
5.6.35-80.0
parent
d4f0686c
Changes
20
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
756 additions
and
83 deletions
+756
-83
storage/tokudb/CMakeLists.txt
storage/tokudb/CMakeLists.txt
+1
-1
storage/tokudb/PerconaFT/ft/ft-ops.cc
storage/tokudb/PerconaFT/ft/ft-ops.cc
+13
-10
storage/tokudb/PerconaFT/ft/ft-ops.h
storage/tokudb/PerconaFT/ft/ft-ops.h
+5
-0
storage/tokudb/PerconaFT/ft/logger/recover.cc
storage/tokudb/PerconaFT/ft/logger/recover.cc
+2
-1
storage/tokudb/PerconaFT/ft/node.cc
storage/tokudb/PerconaFT/ft/node.cc
+10
-8
storage/tokudb/PerconaFT/ft/node.h
storage/tokudb/PerconaFT/ft/node.h
+33
-21
storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
+1
-2
storage/tokudb/PerconaFT/ft/txn/roll.cc
storage/tokudb/PerconaFT/ft/txn/roll.cc
+2
-1
storage/tokudb/PerconaFT/util/dmt.h
storage/tokudb/PerconaFT/util/dmt.h
+0
-5
storage/tokudb/PerconaFT/util/omt.h
storage/tokudb/PerconaFT/util/omt.h
+0
-2
storage/tokudb/ha_tokudb.cc
storage/tokudb/ha_tokudb.cc
+46
-28
storage/tokudb/ha_tokudb.h
storage/tokudb/ha_tokudb.h
+2
-0
storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result
...t/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result
+48
-0
storage/tokudb/mysql-test/tokudb/r/gap_lock_error.result
storage/tokudb/mysql-test/tokudb/r/gap_lock_error.result
+469
-0
storage/tokudb/mysql-test/tokudb/r/percona_kill_idle_trx_tokudb.result
...b/mysql-test/tokudb/r/percona_kill_idle_trx_tokudb.result
+43
-0
storage/tokudb/mysql-test/tokudb/t/dir_per_db_rename_to_nonexisting_schema.test
...est/tokudb/t/dir_per_db_rename_to_nonexisting_schema.test
+67
-0
storage/tokudb/mysql-test/tokudb/t/gap_lock_error.test
storage/tokudb/mysql-test/tokudb/t/gap_lock_error.test
+5
-0
storage/tokudb/mysql-test/tokudb/t/percona_kill_idle_trx_tokudb.test
...udb/mysql-test/tokudb/t/percona_kill_idle_trx_tokudb.test
+4
-0
storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
+1
-1
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_extra_col_slave_tokudb.result
...mysql-test/tokudb_rpl/r/rpl_extra_col_slave_tokudb.result
+4
-3
No files found.
storage/tokudb/CMakeLists.txt
View file @
d71df7e1
SET
(
TOKUDB_VERSION 5.6.3
4-79.1
)
SET
(
TOKUDB_VERSION 5.6.3
5-80.0
)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF
(
CMAKE_SYSTEM_PROCESSOR STREQUAL
"x86_64"
AND
NOT CMAKE_VERSION VERSION_LESS
"2.8.9"
)
...
...
storage/tokudb/PerconaFT/ft/ft-ops.cc
View file @
d71df7e1
...
...
@@ -651,10 +651,8 @@ void toku_ftnode_clone_callback(void *value_data,
// set new pair attr if necessary
if
(
node
->
height
==
0
)
{
*
new_attr
=
make_ftnode_pair_attr
(
node
);
for
(
int
i
=
0
;
i
<
node
->
n_children
;
i
++
)
{
BLB
(
node
,
i
)
->
logical_rows_delta
=
0
;
BLB
(
cloned_node
,
i
)
->
logical_rows_delta
=
0
;
}
node
->
logical_rows_delta
=
0
;
cloned_node
->
logical_rows_delta
=
0
;
}
else
{
new_attr
->
is_valid
=
false
;
}
...
...
@@ -702,6 +700,10 @@ void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
if
(
ftnode
->
height
==
0
)
{
FT_STATUS_INC
(
FT_FULL_EVICTIONS_LEAF
,
1
);
FT_STATUS_INC
(
FT_FULL_EVICTIONS_LEAF_BYTES
,
node_size
);
if
(
!
ftnode
->
dirty
)
{
toku_ft_adjust_logical_row_count
(
ft
,
-
ftnode
->
logical_rows_delta
);
}
}
else
{
FT_STATUS_INC
(
FT_FULL_EVICTIONS_NONLEAF
,
1
);
FT_STATUS_INC
(
FT_FULL_EVICTIONS_NONLEAF_BYTES
,
node_size
);
...
...
@@ -714,11 +716,12 @@ void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
BASEMENTNODE
bn
=
BLB
(
ftnode
,
i
);
toku_ft_decrease_stats
(
&
ft
->
in_memory_stats
,
bn
->
stat64_delta
);
if
(
!
ftnode
->
dirty
)
toku_ft_adjust_logical_row_count
(
ft
,
-
bn
->
logical_rows_delta
);
}
}
if
(
!
ftnode
->
dirty
)
{
toku_ft_adjust_logical_row_count
(
ft
,
-
ftnode
->
logical_rows_delta
);
}
}
}
toku_ftnode_free
(
&
ftnode
);
...
...
@@ -944,8 +947,6 @@ int toku_ftnode_pe_callback(void *ftnode_pv,
basements_to_destroy
[
num_basements_to_destroy
++
]
=
bn
;
toku_ft_decrease_stats
(
&
ft
->
in_memory_stats
,
bn
->
stat64_delta
);
toku_ft_adjust_logical_row_count
(
ft
,
-
bn
->
logical_rows_delta
);
set_BNULL
(
node
,
i
);
BP_STATE
(
node
,
i
)
=
PT_ON_DISK
;
num_partial_evictions
++
;
...
...
@@ -2652,7 +2653,7 @@ static std::unique_ptr<char[], decltype(&toku_free)> toku_file_get_parent_dir(
return
result
;
}
static
bool
toku_create_subdirs_if_needed
(
const
char
*
path
)
{
bool
toku_create_subdirs_if_needed
(
const
char
*
path
)
{
static
const
mode_t
dir_mode
=
S_IRUSR
|
S_IWUSR
|
S_IXUSR
|
S_IRGRP
|
S_IWGRP
|
S_IXGRP
|
S_IROTH
|
S_IXOTH
;
...
...
@@ -4563,6 +4564,8 @@ int toku_ft_rename_iname(DB_TXN *txn,
bs_new_name
);
}
if
(
!
toku_create_subdirs_if_needed
(
new_iname_full
.
get
()))
return
get_error_errno
();
r
=
toku_os_rename
(
old_iname_full
.
get
(),
new_iname_full
.
get
());
if
(
r
!=
0
)
return
r
;
...
...
storage/tokudb/PerconaFT/ft/ft-ops.h
View file @
d71df7e1
...
...
@@ -288,3 +288,8 @@ void toku_ft_set_direct_io(bool direct_io_on);
void
toku_ft_set_compress_buffers_before_eviction
(
bool
compress_buffers
);
void
toku_note_deserialized_basement_node
(
bool
fixed_key_size
);
// Creates all directories for the path if necessary,
// returns true if all dirs are created successfully or
// all dirs exist, false otherwise.
bool
toku_create_subdirs_if_needed
(
const
char
*
path
);
storage/tokudb/PerconaFT/ft/logger/recover.cc
View file @
d71df7e1
...
...
@@ -987,7 +987,8 @@ static int toku_recover_frename(struct logtype_frename *l, RECOVER_ENV renv) {
return
1
;
if
(
old_exist
&&
!
new_exist
&&
(
toku_os_rename
(
old_iname_full
.
get
(),
new_iname_full
.
get
())
==
-
1
||
(
!
toku_create_subdirs_if_needed
(
new_iname_full
.
get
())
||
toku_os_rename
(
old_iname_full
.
get
(),
new_iname_full
.
get
())
==
-
1
||
toku_fsync_directory
(
old_iname_full
.
get
())
==
-
1
||
toku_fsync_directory
(
new_iname_full
.
get
())
==
-
1
))
return
1
;
...
...
storage/tokudb/PerconaFT/ft/node.cc
View file @
d71df7e1
...
...
@@ -386,7 +386,8 @@ static void bnc_apply_messages_to_basement_node(
const
pivot_bounds
&
bounds
,
// contains pivot key bounds of this basement node
txn_gc_info
*
gc_info
,
bool
*
msgs_applied
)
{
bool
*
msgs_applied
,
int64_t
*
logical_rows_delta
)
{
int
r
;
NONLEAF_CHILDINFO
bnc
=
BNC
(
ancestor
,
childnum
);
...
...
@@ -394,7 +395,6 @@ static void bnc_apply_messages_to_basement_node(
// apply messages from this buffer
STAT64INFO_S
stats_delta
=
{
0
,
0
};
uint64_t
workdone_this_ancestor
=
0
;
int64_t
logical_rows_delta
=
0
;
uint32_t
stale_lbi
,
stale_ube
;
if
(
!
bn
->
stale_ancestor_messages_applied
)
{
...
...
@@ -470,7 +470,7 @@ static void bnc_apply_messages_to_basement_node(
gc_info
,
&
workdone_this_ancestor
,
&
stats_delta
,
&
logical_rows_delta
);
logical_rows_delta
);
}
}
else
if
(
stale_lbi
==
stale_ube
)
{
// No stale messages to apply, we just apply fresh messages, and mark
...
...
@@ -482,7 +482,7 @@ static void bnc_apply_messages_to_basement_node(
.
gc_info
=
gc_info
,
.
workdone
=
&
workdone_this_ancestor
,
.
stats_to_update
=
&
stats_delta
,
.
logical_rows_delta
=
&
logical_rows_delta
};
.
logical_rows_delta
=
logical_rows_delta
};
if
(
fresh_ube
-
fresh_lbi
>
0
)
*
msgs_applied
=
true
;
r
=
bnc
->
fresh_message_tree
...
...
@@ -503,7 +503,7 @@ static void bnc_apply_messages_to_basement_node(
.
gc_info
=
gc_info
,
.
workdone
=
&
workdone_this_ancestor
,
.
stats_to_update
=
&
stats_delta
,
.
logical_rows_delta
=
&
logical_rows_delta
};
.
logical_rows_delta
=
logical_rows_delta
};
r
=
bnc
->
stale_message_tree
.
iterate_on_range
<
struct
iterate_do_bn_apply_msg_extra
,
...
...
@@ -521,8 +521,6 @@ static void bnc_apply_messages_to_basement_node(
if
(
stats_delta
.
numbytes
||
stats_delta
.
numrows
)
{
toku_ft_update_stats
(
&
t
->
ft
->
in_memory_stats
,
stats_delta
);
}
toku_ft_adjust_logical_row_count
(
t
->
ft
,
logical_rows_delta
);
bn
->
logical_rows_delta
+=
logical_rows_delta
;
}
static
void
...
...
@@ -536,6 +534,7 @@ apply_ancestors_messages_to_bn(
bool
*
msgs_applied
)
{
int64_t
logical_rows_delta
=
0
;
BASEMENTNODE
curr_bn
=
BLB
(
node
,
childnum
);
const
pivot_bounds
curr_bounds
=
bounds
.
next_bounds
(
node
,
childnum
);
for
(
ANCESTORS
curr_ancestors
=
ancestors
;
curr_ancestors
;
curr_ancestors
=
curr_ancestors
->
next
)
{
...
...
@@ -548,13 +547,16 @@ apply_ancestors_messages_to_bn(
curr_ancestors
->
childnum
,
curr_bounds
,
gc_info
,
msgs_applied
msgs_applied
,
&
logical_rows_delta
);
// We don't want to check this ancestor node again if the
// next time we query it, the msn hasn't changed.
curr_bn
->
max_msn_applied
=
curr_ancestors
->
node
->
max_msn_applied_to_node_on_disk
;
}
}
toku_ft_adjust_logical_row_count
(
t
->
ft
,
logical_rows_delta
);
node
->
logical_rows_delta
+=
logical_rows_delta
;
// At this point, we know all the stale messages above this
// basement node have been applied, and any new messages will be
// fresh, so we don't need to look at stale messages for this
...
...
storage/tokudb/PerconaFT/ft/node.h
View file @
d71df7e1
...
...
@@ -157,36 +157,49 @@ class ftnode_pivot_keys {
// TODO: class me up
struct
ftnode
{
MSN
max_msn_applied_to_node_on_disk
;
// max_msn_applied that will be written to disk
// max_msn_applied that will be written to disk
MSN
max_msn_applied_to_node_on_disk
;
unsigned
int
flags
;
BLOCKNUM
blocknum
;
// Which block number is this node?
int
layout_version
;
// What version of the data structure?
int
layout_version_original
;
// different (<) from layout_version if upgraded from a previous version (useful for debugging)
int
layout_version_read_from_disk
;
// transient, not serialized to disk, (useful for debugging)
uint32_t
build_id
;
// build_id (svn rev number) of software that wrote this node to disk
int
height
;
/* height is always >= 0. 0 for leaf, >0 for nonleaf. */
int
dirty
;
// Which block number is this node?
BLOCKNUM
blocknum
;
// What version of the data structure?
int
layout_version
;
// different (<) from layout_version if upgraded from a previous version
// (useful for debugging)
int
layout_version_original
;
// transient, not serialized to disk, (useful for debugging)
int
layout_version_read_from_disk
;
// build_id (svn rev number) of software that wrote this node to disk
uint32_t
build_id
;
// height is always >= 0. 0 for leaf, >0 for nonleaf.
int
height
;
int
dirty
;
uint32_t
fullhash
;
// current count of rows add or removed as a result of message application
// to this node as a basement, irrelevant for internal nodes, gets reset
// when node is undirtied. Used to back out tree scoped LRC id node is
// evicted but not persisted
int64_t
logical_rows_delta
;
// for internal nodes, if n_children==fanout+1 then the tree needs to be
rebalanced.
// for leaf nodes, represents number of basement nodes
// for internal nodes, if n_children==fanout+1 then the tree needs to be
//
rebalanced.
for leaf nodes, represents number of basement nodes
int
n_children
;
ftnode_pivot_keys
pivotkeys
;
// What's the oldest referenced xid that this node knows about? The real
oldest
//
referenced xid might be younger, but this is our best estimate. We use it
//
as a heuristic to transition provisional mvcc entries from provisional to
// committed (from implicity committed to really committed).
// What's the oldest referenced xid that this node knows about? The real
//
oldest referenced xid might be younger, but this is our best estimate.
//
We use it as a heuristic to transition provisional mvcc entries from
//
provisional to
committed (from implicity committed to really committed).
//
// A better heuristic would be the oldest live txnid, but we use this since
it
//
still works well most of the time, and its readily available on the inject
// code path.
// A better heuristic would be the oldest live txnid, but we use this since
//
it still works well most of the time, and its readily available on the
//
inject
code path.
TXNID
oldest_referenced_xid_known
;
// array of size n_children, consisting of ftnode partitions
// each one is associated with a child
//
for internal nodes, the ith partition corresponds to the ith message buffer
//
for leaf nodes, the ith
partition corresponds to the ith basement node
// each one is associated with a child
for internal nodes, the ith
//
partition corresponds to the ith message buffer for leaf nodes, the ith
// partition corresponds to the ith basement node
struct
ftnode_partition
*
bp
;
struct
ctpair
*
ct_pair
;
};
...
...
@@ -199,7 +212,6 @@ struct ftnode_leaf_basement_node {
MSN
max_msn_applied
;
// max message sequence number applied
bool
stale_ancestor_messages_applied
;
STAT64INFO_S
stat64_delta
;
// change in stat64 counters since basement was last written to disk
int64_t
logical_rows_delta
;
};
typedef
struct
ftnode_leaf_basement_node
*
BASEMENTNODE
;
...
...
storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
View file @
d71df7e1
...
...
@@ -996,7 +996,6 @@ BASEMENTNODE toku_clone_bn(BASEMENTNODE orig_bn) {
bn
->
seqinsert
=
orig_bn
->
seqinsert
;
bn
->
stale_ancestor_messages_applied
=
orig_bn
->
stale_ancestor_messages_applied
;
bn
->
stat64_delta
=
orig_bn
->
stat64_delta
;
bn
->
logical_rows_delta
=
orig_bn
->
logical_rows_delta
;
bn
->
data_buffer
.
clone
(
&
orig_bn
->
data_buffer
);
return
bn
;
}
...
...
@@ -1007,7 +1006,6 @@ BASEMENTNODE toku_create_empty_bn_no_buffer(void) {
bn
->
seqinsert
=
0
;
bn
->
stale_ancestor_messages_applied
=
false
;
bn
->
stat64_delta
=
ZEROSTATS
;
bn
->
logical_rows_delta
=
0
;
bn
->
data_buffer
.
init_zero
();
return
bn
;
}
...
...
@@ -1432,6 +1430,7 @@ static FTNODE alloc_ftnode_for_deserialize(uint32_t fullhash, BLOCKNUM blocknum)
node
->
fullhash
=
fullhash
;
node
->
blocknum
=
blocknum
;
node
->
dirty
=
0
;
node
->
logical_rows_delta
=
0
;
node
->
bp
=
nullptr
;
node
->
oldest_referenced_xid_known
=
TXNID_NONE
;
return
node
;
...
...
storage/tokudb/PerconaFT/ft/txn/roll.cc
View file @
d71df7e1
...
...
@@ -227,7 +227,8 @@ int toku_rollback_frename(BYTESTRING old_iname,
return
1
;
if
(
!
old_exist
&&
new_exist
&&
(
toku_os_rename
(
new_iname_full
.
get
(),
old_iname_full
.
get
())
==
-
1
||
(
!
toku_create_subdirs_if_needed
(
old_iname_full
.
get
())
||
toku_os_rename
(
new_iname_full
.
get
(),
old_iname_full
.
get
())
==
-
1
||
toku_fsync_directory
(
new_iname_full
.
get
())
==
-
1
||
toku_fsync_directory
(
old_iname_full
.
get
())
==
-
1
))
return
1
;
...
...
storage/tokudb/PerconaFT/util/dmt.h
View file @
d71df7e1
...
...
@@ -589,7 +589,6 @@ class dmt {
void
convert_from_tree_to_array
(
void
);
__attribute__
((
nonnull
(
2
,
5
)))
void
delete_internal
(
subtree
*
const
subtreep
,
const
uint32_t
idx
,
subtree
*
const
subtree_replace
,
subtree
**
const
rebalance_subtree
);
template
<
typename
iterate_extra_t
,
...
...
@@ -627,16 +626,12 @@ class dmt {
__attribute__
((
nonnull
))
void
rebalance
(
subtree
*
const
subtree
);
__attribute__
((
nonnull
))
static
void
copyout
(
uint32_t
*
const
outlen
,
dmtdata_t
*
const
out
,
const
dmt_node
*
const
n
);
__attribute__
((
nonnull
))
static
void
copyout
(
uint32_t
*
const
outlen
,
dmtdata_t
**
const
out
,
dmt_node
*
const
n
);
__attribute__
((
nonnull
))
static
void
copyout
(
uint32_t
*
const
outlen
,
dmtdata_t
*
const
out
,
const
uint32_t
len
,
const
dmtdata_t
*
const
stored_value_ptr
);
__attribute__
((
nonnull
))
static
void
copyout
(
uint32_t
*
const
outlen
,
dmtdata_t
**
const
out
,
const
uint32_t
len
,
dmtdata_t
*
const
stored_value_ptr
);
template
<
typename
dmtcmp_t
,
...
...
storage/tokudb/PerconaFT/util/omt.h
View file @
d71df7e1
...
...
@@ -284,7 +284,6 @@ class omt {
* By taking ownership of the array, we save a malloc and memcpy,
* and possibly a free (if the caller is done with the array).
*/
__attribute__
((
nonnull
))
void
create_steal_sorted_array
(
omtdata_t
**
const
values
,
const
uint32_t
numvalues
,
const
uint32_t
new_capacity
);
/**
...
...
@@ -667,7 +666,6 @@ class omt {
void
set_at_internal
(
const
subtree
&
subtree
,
const
omtdata_t
&
value
,
const
uint32_t
idx
);
__attribute__
((
nonnull
(
2
,
5
)))
void
delete_internal
(
subtree
*
const
subtreep
,
const
uint32_t
idx
,
omt_node
*
const
copyn
,
subtree
**
const
rebalance_subtree
);
template
<
typename
iterate_extra_t
,
...
...
storage/tokudb/ha_tokudb.cc
View file @
d71df7e1
...
...
@@ -29,6 +29,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "tokudb_status.h"
#include "tokudb_card.h"
#include "ha_tokudb.h"
#include "sql_db.h"
HASH
TOKUDB_SHARE
::
_open_tables
;
...
...
@@ -6122,8 +6123,6 @@ int ha_tokudb::info(uint flag) {
stats
.
deleted
=
0
;
if
(
!
(
flag
&
HA_STATUS_NO_LOCK
))
{
uint64_t
num_rows
=
0
;
TOKU_DB_FRAGMENTATION_S
frag_info
;
memset
(
&
frag_info
,
0
,
sizeof
frag_info
);
error
=
txn_begin
(
db_env
,
NULL
,
&
txn
,
DB_READ_UNCOMMITTED
,
ha_thd
());
if
(
error
)
{
...
...
@@ -6140,11 +6139,6 @@ int ha_tokudb::info(uint flag) {
}
else
{
goto
cleanup
;
}
error
=
share
->
file
->
get_fragmentation
(
share
->
file
,
&
frag_info
);
if
(
error
)
{
goto
cleanup
;
}
stats
.
delete_length
=
frag_info
.
unused_bytes
;
DB_BTREE_STAT64
dict_stats
;
error
=
share
->
file
->
stat64
(
share
->
file
,
txn
,
&
dict_stats
);
...
...
@@ -6156,6 +6150,7 @@ int ha_tokudb::info(uint flag) {
stats
.
update_time
=
dict_stats
.
bt_modify_time_sec
;
stats
.
check_time
=
dict_stats
.
bt_verify_time_sec
;
stats
.
data_file_length
=
dict_stats
.
bt_dsize
;
stats
.
delete_length
=
dict_stats
.
bt_fsize
-
dict_stats
.
bt_dsize
;
if
(
hidden_primary_key
)
{
//
// in this case, we have a hidden primary key, do not
...
...
@@ -6191,30 +6186,21 @@ int ha_tokudb::info(uint flag) {
//
// this solution is much simpler than trying to maintain an
// accurate number of valid keys at the handlerton layer.
uint
curr_num_DBs
=
table
->
s
->
keys
+
tokudb_test
(
hidden_primary_key
);
uint
curr_num_DBs
=
table
->
s
->
keys
+
tokudb_test
(
hidden_primary_key
);
for
(
uint
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
// skip the primary key, skip dropped indexes
if
(
i
==
primary_key
||
share
->
key_file
[
i
]
==
NULL
)
{
continue
;
}
error
=
share
->
key_file
[
i
]
->
stat64
(
share
->
key_file
[
i
],
txn
,
&
dict_stats
);
error
=
share
->
key_file
[
i
]
->
stat64
(
share
->
key_file
[
i
],
txn
,
&
dict_stats
);
if
(
error
)
{
goto
cleanup
;
}
stats
.
index_file_length
+=
dict_stats
.
bt_dsize
;
error
=
share
->
file
->
get_fragmentation
(
share
->
file
,
&
frag_info
);
if
(
error
)
{
goto
cleanup
;
}
stats
.
delete_length
+=
frag_info
.
unused_bytes
;
stats
.
delete_length
+=
dict_stats
.
bt_fsize
-
dict_stats
.
bt_dsize
;
}
}
...
...
@@ -7651,6 +7637,27 @@ int ha_tokudb::delete_table(const char *name) {
TOKUDB_HANDLER_DBUG_RETURN
(
error
);
}
static
bool
tokudb_check_db_dir_exist_from_table_name
(
const
char
*
table_name
)
{
DBUG_ASSERT
(
table_name
);
bool
mysql_dir_exists
;
char
db_name
[
FN_REFLEN
];
const
char
*
db_name_begin
=
strchr
(
table_name
,
FN_LIBCHAR
);
const
char
*
db_name_end
=
strrchr
(
table_name
,
FN_LIBCHAR
);
DBUG_ASSERT
(
db_name_begin
);
DBUG_ASSERT
(
db_name_end
);
DBUG_ASSERT
(
db_name_begin
!=
db_name_end
);
++
db_name_begin
;
size_t
db_name_size
=
db_name_end
-
db_name_begin
;
DBUG_ASSERT
(
db_name_size
<
FN_REFLEN
);
memcpy
(
db_name
,
db_name_begin
,
db_name_size
);
db_name
[
db_name_size
]
=
'\0'
;
mysql_dir_exists
=
(
check_db_dir_existence
(
db_name
)
==
0
);
return
mysql_dir_exists
;
}
//
// renames table from "from" to "to"
...
...
@@ -7673,15 +7680,26 @@ int ha_tokudb::rename_table(const char *from, const char *to) {
TOKUDB_SHARE
::
drop_share
(
share
);
}
int
error
;
error
=
delete_or_rename_table
(
from
,
to
,
false
);
if
(
TOKUDB_LIKELY
(
TOKUDB_DEBUG_FLAGS
(
TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS
)
==
0
)
&&
error
==
DB_LOCK_NOTGRANTED
)
{
bool
to_db_dir_exist
=
tokudb_check_db_dir_exist_from_table_name
(
to
);
if
(
!
to_db_dir_exist
)
{
sql_print_error
(
"Could not rename table from %s to %s because another transaction "
"has accessed the table. To rename the table, make sure no "
"transactions touch the table."
,
"Could not rename table from %s to %s because "
"destination db does not exist"
,
from
,
to
);
error
=
HA_ERR_DEST_SCHEMA_NOT_EXIST
;
}
else
{
error
=
delete_or_rename_table
(
from
,
to
,
false
);
if
(
TOKUDB_LIKELY
(
TOKUDB_DEBUG_FLAGS
(
TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS
)
==
0
)
&&
error
==
DB_LOCK_NOTGRANTED
)
{
sql_print_error
(
"Could not rename table from %s to %s because another transaction "
"has accessed the table. To rename the table, make sure no "
"transactions touch the table."
,
from
,
to
);
}
}
TOKUDB_HANDLER_DBUG_RETURN
(
error
);
}
...
...
storage/tokudb/ha_tokudb.h
View file @
d71df7e1
...
...
@@ -816,6 +816,8 @@ class ha_tokudb : public handler {
int
index_first
(
uchar
*
buf
);
int
index_last
(
uchar
*
buf
);
bool
has_gap_locks
()
const
{
return
true
;
}
int
rnd_init
(
bool
scan
);
int
rnd_end
();
int
rnd_next
(
uchar
*
buf
);
...
...
storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result
0 → 100644
View file @
d71df7e1
SET GLOBAL tokudb_dir_per_db=true;
######
# Tokudb and mysql data dirs are the same, rename to existent db
###
CREATE DATABASE new_db;
CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
ALTER TABLE test.t1 RENAME new_db.t1;
The content of "test" directory:
The content of "new_db" directory:
db.opt
t1.frm
t1_main_id.tokudb
t1_status_id.tokudb
DROP DATABASE new_db;
######
# Tokudb and mysql data dirs are the same, rename to nonexistent db
###
CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
CALL mtr.add_suppression("because destination db does not exist");
ALTER TABLE test.t1 RENAME foo.t1;
ERROR HY000: Error on rename of './test/t1' to './foo/t1' (errno: 192 - Destination schema does not exist)
DROP TABLE t1;
SELECT @@tokudb_data_dir;
@@tokudb_data_dir
CUSTOM_TOKUDB_DATA_DIR
SELECT @@tokudb_dir_per_db;
@@tokudb_dir_per_db
1
######
# Tokudb and mysql data dirs are different, rename to existent db
###
CREATE DATABASE new_db;
CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
ALTER TABLE test.t1 RENAME new_db.t1;
The content of "test" direcotry:
The content of "new_db" directory:
t1_main_id.tokudb
t1_status_id.tokudb
DROP DATABASE new_db;
######
# Tokudb and mysql data dirs are different, rename to nonexistent db
###
CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
CALL mtr.add_suppression("because destination db does not exist");
ALTER TABLE test.t1 RENAME foo.t1;
ERROR HY000: Error on rename of './test/t1' to './foo/t1' (errno: 192 - Destination schema does not exist)
DROP TABLE t1;
SET GLOBAL tokudb_dir_per_db=default;
storage/tokudb/mysql-test/tokudb/r/gap_lock_error.result
0 → 100644
View file @
d71df7e1
This diff is collapsed.
Click to expand it.
storage/tokudb/mysql-test/tokudb/r/percona_kill_idle_trx_tokudb.result
0 → 100644
View file @
d71df7e1
SET default_storage_engine=TokuDB;
#
# Test kill_idle_transaction_timeout feature with TokuDB
#
CREATE TABLE t1 (a INT);
SET GLOBAL kill_idle_transaction= 1;
BEGIN;
INSERT INTO t1 VALUES (1),(2);
COMMIT;
SELECT * FROM t1;
a
1
2
BEGIN;
INSERT INTO t1 VALUES (3);
# Current connection idle transaction killed, reconnecting
SELECT * FROM t1;
a
1
2
#
# Test that row locks are released on idle transaction kill
#
SET GLOBAL kill_idle_transaction= 2;
# Take row locks in connection conn1
BEGIN;
SELECT * FROM t1 FOR UPDATE;
a
1
2
# Take row locks in connection default
UPDATE t1 SET a=4;
SELECT * FROM t1;
a
4
4
# Show that connection conn1 has been killed
SELECT * FROM t1;
ERROR HY000: MySQL server has gone away
# connection default
# Cleanup
DROP TABLE t1;
SET GLOBAL kill_idle_transaction= saved_kill_idle_transaction;
storage/tokudb/mysql-test/tokudb/t/dir_per_db_rename_to_nonexisting_schema.test
0 → 100644
View file @
d71df7e1
--
source
include
/
have_tokudb
.
inc
SET
GLOBAL
tokudb_dir_per_db
=
true
;
--
let
DATADIR
=
`SELECT @@datadir`
--
echo
######
--
echo
# Tokudb and mysql data dirs are the same, rename to existent db
--
echo
###
CREATE
DATABASE
new_db
;
CREATE
TABLE
t1
(
id
INT
AUTO_INCREMENT
PRIMARY
KEY
NOT
NULL
)
ENGINE
=
tokudb
;
ALTER
TABLE
test
.
t1
RENAME
new_db
.
t1
;
--
echo
The
content
of
"test"
directory
:
--
source
include
/
table_files_replace_pattern
.
inc
--
sorted_result
--
list_files
$DATADIR
/
test
--
echo
The
content
of
"new_db"
directory
:
--
source
include
/
table_files_replace_pattern
.
inc
--
sorted_result
--
list_files
$DATADIR
/
new_db
DROP
DATABASE
new_db
;
--
echo
######
--
echo
# Tokudb and mysql data dirs are the same, rename to nonexistent db
--
echo
###
CREATE
TABLE
t1
(
id
INT
AUTO_INCREMENT
PRIMARY
KEY
NOT
NULL
)
ENGINE
=
tokudb
;
CALL
mtr
.
add_suppression
(
"because destination db does not exist"
);
--
error
ER_ERROR_ON_RENAME
ALTER
TABLE
test
.
t1
RENAME
foo
.
t1
;
DROP
TABLE
t1
;
--
let
$custom_tokudb_data_dir
=
$MYSQL_TMP_DIR
/
custom_tokudb_data_dir
--
mkdir
$custom_tokudb_data_dir
--
replace_result
$custom_tokudb_data_dir
CUSTOM_TOKUDB_DATA_DIR
--
let
$restart_parameters
=
restart
:--
loose
-
tokudb
-
data
-
dir
=
$custom_tokudb_data_dir
--
loose
-
tokudb
-
dir
-
per
-
db
=
true
--
source
include
/
restart_mysqld
.
inc
--
replace_result
$custom_tokudb_data_dir
CUSTOM_TOKUDB_DATA_DIR
SELECT
@@
tokudb_data_dir
;
SELECT
@@
tokudb_dir_per_db
;
--
echo
######
--
echo
# Tokudb and mysql data dirs are different, rename to existent db
--
echo
###
CREATE
DATABASE
new_db
;
CREATE
TABLE
t1
(
id
INT
AUTO_INCREMENT
PRIMARY
KEY
NOT
NULL
)
ENGINE
=
tokudb
;
ALTER
TABLE
test
.
t1
RENAME
new_db
.
t1
;
--
echo
The
content
of
"test"
direcotry
:
--
source
include
/
table_files_replace_pattern
.
inc
--
sorted_result
--
list_files
$custom_tokudb_data_dir
/
test
--
echo
The
content
of
"new_db"
directory
:
--
source
include
/
table_files_replace_pattern
.
inc
--
sorted_result
--
list_files
$custom_tokudb_data_dir
/
new_db
DROP
DATABASE
new_db
;
--
echo
######
--
echo
# Tokudb and mysql data dirs are different, rename to nonexistent db
--
echo
###
CREATE
TABLE
t1
(
id
INT
AUTO_INCREMENT
PRIMARY
KEY
NOT
NULL
)
ENGINE
=
tokudb
;
CALL
mtr
.
add_suppression
(
"because destination db does not exist"
);
--
error
ER_ERROR_ON_RENAME
ALTER
TABLE
test
.
t1
RENAME
foo
.
t1
;
DROP
TABLE
t1
;
SET
GLOBAL
tokudb_dir_per_db
=
default
;
storage/tokudb/mysql-test/tokudb/t/gap_lock_error.test
0 → 100644
View file @
d71df7e1
--
source
include
/
have_tokudb
.
inc
let
$engine
=
tokudb
;
let
$expect_gap_lock_errors
=
0
;
--
source
include
/
gap_lock_error_all
.
inc
storage/tokudb/mysql-test/tokudb/t/percona_kill_idle_trx_tokudb.test
0 → 100644
View file @
d71df7e1
--
source
include
/
have_tokudb
.
inc
SET
default_storage_engine
=
TokuDB
;
--
source
include
/
percona_kill_idle_trx
.
inc
storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
View file @
d71df7e1
$TOKUDB_OPT $TOKUDB_LOAD_ADD
$TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD
--loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M
$TOKUDB_OPT $TOKUDB_LOAD_ADD
_PATH $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD_PATH
--loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M
storage/tokudb/mysql-test/tokudb_rpl/r/rpl_extra_col_slave_tokudb.result
View file @
d71df7e1
...
...
@@ -208,9 +208,10 @@ set @b1 = concat(@b1,@b1);
INSERT INTO t9 () VALUES(1,@b1,'Kyle'),(2,@b1,'JOE'),(3,@b1,'QA');
select * from t9;
a b c d e f g h i
1 b1b1b1b1b1b1b1b1 Kyle 0000-00-00 00:00:00 0 NULL NULL
2 b1b1b1b1b1b1b1b1 JOE 0000-00-00 00:00:00 0 NULL NULL
3 b1b1b1b1b1b1b1b1 QA 0000-00-00 00:00:00 0 NULL NULL
1 b1b1b1b1b1b1b1b1 Kyle CURRENT_TIMESTAMP 0 NULL NULL
2 b1b1b1b1b1b1b1b1 JOE CURRENT_TIMESTAMP 0 NULL NULL
3 b1b1b1b1b1b1b1b1 QA CURRENT_TIMESTAMP 0 NULL NULL
include/assert.inc [The values of column 'd' should have non-zero timetsamp.]
DROP TABLE t9;
*** Create t10 on slave ***
STOP SLAVE;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment