Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
935f78ac
Commit
935f78ac
authored
Sep 23, 2013
by
Rich Prohaska
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
#92 add key_is_clustering accessor
parent
f0629146
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
24 additions
and
20 deletions
+24
-20
storage/tokudb/ha_tokudb.cc
storage/tokudb/ha_tokudb.cc
+14
-14
storage/tokudb/ha_tokudb.h
storage/tokudb/ha_tokudb.h
+4
-0
storage/tokudb/ha_tokudb_alter_56.cc
storage/tokudb/ha_tokudb_alter_56.cc
+4
-4
storage/tokudb/ha_tokudb_alter_common.cc
storage/tokudb/ha_tokudb_alter_common.cc
+1
-1
storage/tokudb/ha_tokudb_update.cc
storage/tokudb/ha_tokudb_update.cc
+1
-1
No files found.
storage/tokudb/ha_tokudb.cc
View file @
935f78ac
...
@@ -408,7 +408,7 @@ ulong ha_tokudb::index_flags(uint idx, uint part, bool all_parts) const {
...
@@ -408,7 +408,7 @@ ulong ha_tokudb::index_flags(uint idx, uint part, bool all_parts) const {
#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
flags
|=
HA_DO_INDEX_COND_PUSHDOWN
;
flags
|=
HA_DO_INDEX_COND_PUSHDOWN
;
#endif
#endif
if
(
table_share
->
key_info
[
idx
].
flags
&
HA_CLUSTERING
)
{
if
(
key_is_clustering
(
&
table_share
->
key_info
[
idx
])
)
{
flags
|=
HA_CLUSTERED_INDEX
;
flags
|=
HA_CLUSTERED_INDEX
;
}
}
DBUG_RETURN
(
flags
);
DBUG_RETURN
(
flags
);
...
@@ -1658,7 +1658,7 @@ static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, K
...
@@ -1658,7 +1658,7 @@ static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, K
}
}
}
}
}
}
if
(
i
==
primary_key
||
table_share
->
key_info
[
i
].
flags
&
HA_CLUSTERING
)
{
if
(
i
==
primary_key
||
key_is_clustering
(
&
table_share
->
key_info
[
i
])
)
{
error
=
initialize_col_pack_info
(
kc_info
,
table_share
,
i
);
error
=
initialize_col_pack_info
(
kc_info
,
table_share
,
i
);
if
(
error
)
{
if
(
error
)
{
goto
exit
;
goto
exit
;
...
@@ -3817,7 +3817,7 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) {
...
@@ -3817,7 +3817,7 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) {
//
//
// test key packing of clustering keys
// test key packing of clustering keys
//
//
if
(
table
->
key_info
[
keynr
].
flags
&
HA_CLUSTERING
)
{
if
(
key_is_clustering
(
&
table
->
key_info
[
keynr
])
)
{
error
=
pack_row
(
&
row
,
(
const
uchar
*
)
record
,
keynr
);
error
=
pack_row
(
&
row
,
(
const
uchar
*
)
record
,
keynr
);
assert
(
error
==
0
);
assert
(
error
==
0
);
uchar
*
tmp_buff
=
NULL
;
uchar
*
tmp_buff
=
NULL
;
...
@@ -4444,7 +4444,7 @@ void ha_tokudb::set_query_columns(uint keynr) {
...
@@ -4444,7 +4444,7 @@ void ha_tokudb::set_query_columns(uint keynr) {
key_index
=
primary_key
;
key_index
=
primary_key
;
}
}
else
{
else
{
key_index
=
(
table
->
key_info
[
keynr
].
flags
&
HA_CLUSTERING
?
keynr
:
primary_key
);
key_index
=
(
key_is_clustering
(
&
table
->
key_info
[
keynr
])
?
keynr
:
primary_key
);
}
}
for
(
uint
i
=
0
;
i
<
table_share
->
fields
;
i
++
)
{
for
(
uint
i
=
0
;
i
<
table_share
->
fields
;
i
++
)
{
if
(
bitmap_is_set
(
table
->
read_set
,
i
)
||
if
(
bitmap_is_set
(
table
->
read_set
,
i
)
||
...
@@ -4779,7 +4779,7 @@ int ha_tokudb::read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT con
...
@@ -4779,7 +4779,7 @@ int ha_tokudb::read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT con
//
//
// case where we read from secondary table that is not clustered
// case where we read from secondary table that is not clustered
//
//
if
(
keynr
!=
primary_key
&&
!
(
table
->
key_info
[
keynr
].
flags
&
HA_CLUSTERING
))
{
if
(
keynr
!=
primary_key
&&
!
key_is_clustering
(
&
table
->
key_info
[
keynr
]
))
{
bool
has_null
;
bool
has_null
;
//
//
// create a DBT that has the same data as row, this is inefficient
// create a DBT that has the same data as row, this is inefficient
...
@@ -4993,7 +4993,7 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
...
@@ -4993,7 +4993,7 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
break
;
break
;
}
}
error
=
handle_cursor_error
(
error
,
HA_ERR_KEY_NOT_FOUND
,
tokudb_active_index
);
error
=
handle_cursor_error
(
error
,
HA_ERR_KEY_NOT_FOUND
,
tokudb_active_index
);
if
(
!
error
&&
!
key_read
&&
tokudb_active_index
!=
primary_key
&&
!
(
table
->
key_info
[
tokudb_active_index
].
flags
&
HA_CLUSTERING
))
{
if
(
!
error
&&
!
key_read
&&
tokudb_active_index
!=
primary_key
&&
!
key_is_clustering
(
&
table
->
key_info
[
tokudb_active_index
]
))
{
error
=
read_full_row
(
buf
);
error
=
read_full_row
(
buf
);
}
}
...
@@ -5398,7 +5398,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) {
...
@@ -5398,7 +5398,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) {
// key
// key
need_val
=
(
this
->
key_read
==
0
)
&&
need_val
=
(
this
->
key_read
==
0
)
&&
(
tokudb_active_index
==
primary_key
||
(
tokudb_active_index
==
primary_key
||
table
->
key_info
[
tokudb_active_index
].
flags
&
HA_CLUSTERING
key_is_clustering
(
&
table
->
key_info
[
tokudb_active_index
])
);
);
if
((
bytes_used_in_range_query_buff
-
curr_range_query_buff_offset
)
>
0
)
{
if
((
bytes_used_in_range_query_buff
-
curr_range_query_buff_offset
)
>
0
)
{
...
@@ -5478,7 +5478,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) {
...
@@ -5478,7 +5478,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) {
// main table.
// main table.
//
//
if
(
!
error
&&
!
key_read
&&
(
tokudb_active_index
!=
primary_key
)
&&
!
(
table
->
key_info
[
tokudb_active_index
].
flags
&
HA_CLUSTERING
)
)
{
if
(
!
error
&&
!
key_read
&&
(
tokudb_active_index
!=
primary_key
)
&&
!
key_is_clustering
(
&
table
->
key_info
[
tokudb_active_index
])
)
{
error
=
read_full_row
(
buf
);
error
=
read_full_row
(
buf
);
}
}
trx
->
stmt_progress
.
queried
++
;
trx
->
stmt_progress
.
queried
++
;
...
@@ -5559,7 +5559,7 @@ int ha_tokudb::index_first(uchar * buf) {
...
@@ -5559,7 +5559,7 @@ int ha_tokudb::index_first(uchar * buf) {
// still need to get entire contents of the row if operation done on
// still need to get entire contents of the row if operation done on
// secondary DB and it was NOT a covering index
// secondary DB and it was NOT a covering index
//
//
if
(
!
error
&&
!
key_read
&&
(
tokudb_active_index
!=
primary_key
)
&&
!
(
table
->
key_info
[
tokudb_active_index
].
flags
&
HA_CLUSTERING
)
)
{
if
(
!
error
&&
!
key_read
&&
(
tokudb_active_index
!=
primary_key
)
&&
!
key_is_clustering
(
&
table
->
key_info
[
tokudb_active_index
])
)
{
error
=
read_full_row
(
buf
);
error
=
read_full_row
(
buf
);
}
}
trx
->
stmt_progress
.
queried
++
;
trx
->
stmt_progress
.
queried
++
;
...
@@ -5601,7 +5601,7 @@ int ha_tokudb::index_last(uchar * buf) {
...
@@ -5601,7 +5601,7 @@ int ha_tokudb::index_last(uchar * buf) {
// still need to get entire contents of the row if operation done on
// still need to get entire contents of the row if operation done on
// secondary DB and it was NOT a covering index
// secondary DB and it was NOT a covering index
//
//
if
(
!
error
&&
!
key_read
&&
(
tokudb_active_index
!=
primary_key
)
&&
!
(
table
->
key_info
[
tokudb_active_index
].
flags
&
HA_CLUSTERING
)
)
{
if
(
!
error
&&
!
key_read
&&
(
tokudb_active_index
!=
primary_key
)
&&
!
key_is_clustering
(
&
table
->
key_info
[
tokudb_active_index
])
)
{
error
=
read_full_row
(
buf
);
error
=
read_full_row
(
buf
);
}
}
...
@@ -6754,7 +6754,7 @@ static uint32_t create_secondary_key_descriptor(
...
@@ -6754,7 +6754,7 @@ static uint32_t create_secondary_key_descriptor(
form
->
s
,
form
->
s
,
kc_info
,
kc_info
,
keynr
,
keynr
,
key_i
nfo
->
flags
&
HA_CLUSTERING
key_i
s_clustering
(
key_info
)
);
);
return
ptr
-
buf
;
return
ptr
-
buf
;
}
}
...
@@ -7342,7 +7342,7 @@ double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows)
...
@@ -7342,7 +7342,7 @@ double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows)
{
{
TOKUDB_DBUG_ENTER
(
"ha_tokudb::keyread_time"
);
TOKUDB_DBUG_ENTER
(
"ha_tokudb::keyread_time"
);
double
ret_val
;
double
ret_val
;
if
(
(
table
->
key_info
[
index
].
flags
&
HA_CLUSTERING
)
||
(
index
==
primary_key
))
{
if
(
index
==
primary_key
||
key_is_clustering
(
&
table
->
key_info
[
index
]
))
{
ret_val
=
read_time
(
index
,
ranges
,
rows
);
ret_val
=
read_time
(
index
,
ranges
,
rows
);
DBUG_RETURN
(
ret_val
);
DBUG_RETURN
(
ret_val
);
}
}
...
@@ -7392,7 +7392,7 @@ double ha_tokudb::read_time(
...
@@ -7392,7 +7392,7 @@ double ha_tokudb::read_time(
goto
cleanup
;
goto
cleanup
;
}
}
is_clustering
=
(
table
->
key_info
[
index
].
flags
&
HA_CLUSTERING
);
is_clustering
=
key_is_clustering
(
&
table
->
key_info
[
index
]
);
//
//
...
@@ -7757,7 +7757,7 @@ int ha_tokudb::tokudb_add_index(
...
@@ -7757,7 +7757,7 @@ int ha_tokudb::tokudb_add_index(
curr_index
=
curr_num_DBs
;
curr_index
=
curr_num_DBs
;
*
modified_DBs
=
true
;
*
modified_DBs
=
true
;
for
(
uint
i
=
0
;
i
<
num_of_keys
;
i
++
,
curr_index
++
)
{
for
(
uint
i
=
0
;
i
<
num_of_keys
;
i
++
,
curr_index
++
)
{
if
(
key_i
nfo
[
i
].
flags
&
HA_CLUSTERING
)
{
if
(
key_i
s_clustering
(
&
key_info
[
i
])
)
{
set_key_filter
(
set_key_filter
(
&
share
->
kc_info
.
key_filters
[
curr_index
],
&
share
->
kc_info
.
key_filters
[
curr_index
],
&
key_info
[
i
],
&
key_info
[
i
],
...
...
storage/tokudb/ha_tokudb.h
View file @
935f78ac
...
@@ -787,5 +787,9 @@ class ha_tokudb : public handler {
...
@@ -787,5 +787,9 @@ class ha_tokudb : public handler {
#endif
#endif
};
};
static
inline
bool
key_is_clustering
(
const
KEY
*
key
)
{
return
key
->
flags
&
HA_CLUSTERING
;
}
#endif
#endif
storage/tokudb/ha_tokudb_alter_56.cc
View file @
935f78ac
...
@@ -632,7 +632,7 @@ int ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplac
...
@@ -632,7 +632,7 @@ int ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplac
if
(
error
)
if
(
error
)
goto
cleanup
;
goto
cleanup
;
if
(
i
==
primary_key
||
table_share
->
key_info
[
i
].
flags
&
HA_CLUSTERING
)
{
if
(
i
==
primary_key
||
key_is_clustering
(
&
table_share
->
key_info
[
i
])
)
{
num_column_extra
=
fill_row_mutator
(
num_column_extra
=
fill_row_mutator
(
column_extra
,
column_extra
,
columns
,
columns
,
...
@@ -757,7 +757,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(TABLE *altered_table, Alter_in
...
@@ -757,7 +757,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(TABLE *altered_table, Alter_in
break
;
break
;
// for all trees that have values, make an update variable offsets message and broadcast it into the tree
// for all trees that have values, make an update variable offsets message and broadcast it into the tree
if
(
i
==
primary_key
||
(
table_share
->
key_info
[
i
].
flags
&
HA_CLUSTERING
))
{
if
(
i
==
primary_key
||
key_is_clustering
(
&
table_share
->
key_info
[
i
]
))
{
uint32_t
offset_start
=
table_share
->
null_bytes
+
share
->
kc_info
.
mcp_info
[
i
].
fixed_field_size
;
uint32_t
offset_start
=
table_share
->
null_bytes
+
share
->
kc_info
.
mcp_info
[
i
].
fixed_field_size
;
uint32_t
offset_end
=
offset_start
+
share
->
kc_info
.
mcp_info
[
i
].
len_of_offsets
;
uint32_t
offset_end
=
offset_start
+
share
->
kc_info
.
mcp_info
[
i
].
len_of_offsets
;
uint32_t
number_of_offsets
=
offset_end
-
offset_start
;
uint32_t
number_of_offsets
=
offset_end
-
offset_start
;
...
@@ -939,7 +939,7 @@ int ha_tokudb::alter_table_expand_one_column(TABLE *altered_table, Alter_inplace
...
@@ -939,7 +939,7 @@ int ha_tokudb::alter_table_expand_one_column(TABLE *altered_table, Alter_inplace
break
;
break
;
// for all trees that have values, make an expand update message and broadcast it into the tree
// for all trees that have values, make an expand update message and broadcast it into the tree
if
(
i
==
primary_key
||
(
table_share
->
key_info
[
i
].
flags
&
HA_CLUSTERING
))
{
if
(
i
==
primary_key
||
key_is_clustering
(
&
table_share
->
key_info
[
i
]
))
{
uint32_t
old_offset
=
alter_table_field_offset
(
table_share
->
null_bytes
,
ctx
->
table_kc_info
,
i
,
expand_field_num
);
uint32_t
old_offset
=
alter_table_field_offset
(
table_share
->
null_bytes
,
ctx
->
table_kc_info
,
i
,
expand_field_num
);
uint32_t
new_offset
=
alter_table_field_offset
(
table_share
->
null_bytes
,
ctx
->
altered_table_kc_info
,
i
,
expand_field_num
);
uint32_t
new_offset
=
alter_table_field_offset
(
table_share
->
null_bytes
,
ctx
->
altered_table_kc_info
,
i
,
expand_field_num
);
assert
(
old_offset
<=
new_offset
);
assert
(
old_offset
<=
new_offset
);
...
@@ -1018,7 +1018,7 @@ int ha_tokudb::alter_table_expand_blobs(TABLE *altered_table, Alter_inplace_info
...
@@ -1018,7 +1018,7 @@ int ha_tokudb::alter_table_expand_blobs(TABLE *altered_table, Alter_inplace_info
break
;
break
;
// for all trees that have values, make an update blobs message and broadcast it into the tree
// for all trees that have values, make an update blobs message and broadcast it into the tree
if
(
i
==
primary_key
||
(
table_share
->
key_info
[
i
].
flags
&
HA_CLUSTERING
))
{
if
(
i
==
primary_key
||
key_is_clustering
(
&
table_share
->
key_info
[
i
]
))
{
tokudb
::
buffer
b
;
tokudb
::
buffer
b
;
uint8_t
op
=
UPDATE_OP_EXPAND_BLOB
;
uint8_t
op
=
UPDATE_OP_EXPAND_BLOB
;
b
.
append
(
&
op
,
sizeof
op
);
b
.
append
(
&
op
,
sizeof
op
);
...
...
storage/tokudb/ha_tokudb_alter_common.cc
View file @
935f78ac
...
@@ -126,7 +126,7 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print
...
@@ -126,7 +126,7 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print
retval
=
false
;
retval
=
false
;
goto
cleanup
;
goto
cleanup
;
}
}
if
(
((
curr_orig_key
->
flags
&
HA_CLUSTERING
)
==
0
)
!=
((
curr_altered_key
->
flags
&
HA_CLUSTERING
)
==
0
))
{
if
(
key_is_clustering
(
curr_orig_key
)
!=
key_is_clustering
(
curr_altered_key
))
{
if
(
print_error
)
{
if
(
print_error
)
{
sql_print_error
(
sql_print_error
(
"keys disagree on if they are clustering, %d, %d"
,
"keys disagree on if they are clustering, %d, %d"
,
...
...
storage/tokudb/ha_tokudb_update.cc
View file @
935f78ac
...
@@ -538,7 +538,7 @@ static bool check_point_update(Item *conds, TABLE *table) {
...
@@ -538,7 +538,7 @@ static bool check_point_update(Item *conds, TABLE *table) {
// Precompute this when the table is opened.
// Precompute this when the table is opened.
static
bool
clustering_keys_exist
(
TABLE
*
table
)
{
static
bool
clustering_keys_exist
(
TABLE
*
table
)
{
for
(
uint
keynr
=
0
;
keynr
<
table
->
s
->
keys
;
keynr
++
)
{
for
(
uint
keynr
=
0
;
keynr
<
table
->
s
->
keys
;
keynr
++
)
{
if
(
keynr
!=
table
->
s
->
primary_key
&&
(
table
->
s
->
key_info
[
keynr
].
flags
&
HA_CLUSTERING
))
if
(
keynr
!=
table
->
s
->
primary_key
&&
key_is_clustering
(
&
table
->
s
->
key_info
[
keynr
]
))
return
true
;
return
true
;
}
}
return
false
;
return
false
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment