Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
bd7f7b14
Commit
bd7f7b14
authored
Feb 21, 2019
by
Sergei Golubchik
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
MDEV-371 Unique Index for long columns
post-merge fixes
parent
f6000782
Changes
11
Show whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
133 additions
and
134 deletions
+133
-134
mysql-test/main/long_unique.result
mysql-test/main/long_unique.result
+2
-2
mysql-test/main/long_unique.test
mysql-test/main/long_unique.test
+5
-1
mysql-test/main/long_unique_debug.test
mysql-test/main/long_unique_debug.test
+5
-1
mysql-test/main/long_unique_innodb.result
mysql-test/main/long_unique_innodb.result
+10
-0
mysql-test/main/long_unique_innodb.test
mysql-test/main/long_unique_innodb.test
+8
-3
mysql-test/main/long_unique_update.test
mysql-test/main/long_unique_update.test
+3
-0
mysql-test/main/long_unique_using_hash.test
mysql-test/main/long_unique_using_hash.test
+3
-0
sql/share/errmsg-utf8.txt
sql/share/errmsg-utf8.txt
+2
-3
sql/sql_show.cc
sql/sql_show.cc
+2
-6
sql/sql_table.cc
sql/sql_table.cc
+73
-96
sql/table.cc
sql/table.cc
+20
-22
No files found.
mysql-test/main/long_unique.result
View file @
bd7f7b14
...
...
@@ -1387,13 +1387,13 @@ create table t1(a blob unique) partition by hash(a);
ERROR HY000: A BLOB field is not allowed in partition function
#key length > 2^16 -1
create table t1(a blob, unique(a(65536)));
ERROR
HY000: Max key segment length is 65535
ERROR
42000: Specified key part was too long; max key part length is 65535 bytes
create table t1(a blob, unique(a(65535)));
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` blob DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH
UNIQUE KEY `a` (`a`
(65535)
) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
#64 indexes
...
...
mysql-test/main/long_unique.test
View file @
bd7f7b14
let
datadir
=
`select @@datadir`
;
--
source
include
/
have_partition
.
inc
#
# MDEV-371 Unique indexes for blobs
#
--
echo
#Structure of tests
--
echo
#First we will check all option for
--
echo
#table containing single unique column
...
...
@@ -475,7 +479,7 @@ drop table t1;
--
error
ER_BLOB_FIELD_IN_PART_FUNC_ERROR
create
table
t1
(
a
blob
unique
)
partition
by
hash
(
a
);
--
echo
#key length > 2^16 -1
--
error
ER_TOO_LONG_
HASH_KEYSEG
--
error
ER_TOO_LONG_
KEYPART
create
table
t1
(
a
blob
,
unique
(
a
(
65536
)));
create
table
t1
(
a
blob
,
unique
(
a
(
65535
)));
show
create
table
t1
;
...
...
mysql-test/main/long_unique_debug.test
View file @
bd7f7b14
--
source
include
/
have_debug
.
inc
--
source
include
/
have_innodb
.
inc
#
# MDEV-371 Unique indexes for blobs
#
--
echo
#In this test case we will check what will happen in the case of hash collision
SET
debug_dbug
=
"d,same_long_unique_hash"
;
...
...
mysql-test/main/long_unique_innodb.result
View file @
bd7f7b14
...
...
@@ -3,6 +3,16 @@ insert into t1 values('RUC');
insert into t1 values ('RUC');
ERROR 23000: Duplicate entry 'RUC' for key 'a'
drop table t1;
create table t1 (a blob unique , c int unique) engine=innodb;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` blob DEFAULT NULL,
`c` int(11) DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH,
UNIQUE KEY `c` (`c`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
drop table t1;
#test for concurrent insert of long unique in innodb
create table t1(a blob unique) engine= InnoDB;
show create table t1;
...
...
mysql-test/main/long_unique_innodb.test
View file @
bd7f7b14
--
source
include
/
have_innodb
.
inc
#
# MDEV-371 Unique indexes for blobs
#
create
table
t1
(
a
blob
unique
)
engine
=
InnoDB
;
insert
into
t1
values
(
'RUC'
);
--
error
ER_DUP_ENTRY
insert
into
t1
values
(
'RUC'
);
drop
table
t1
;
create
table
t1
(
a
blob
unique
,
c
int
unique
)
engine
=
innodb
;
show
create
table
t1
;
drop
table
t1
;
--
echo
#test for concurrent insert of long unique in innodb
create
table
t1
(
a
blob
unique
)
engine
=
InnoDB
;
show
create
table
t1
;
...
...
@@ -33,7 +41,6 @@ insert into t1 values('RC');
commit
;
set
transaction
isolation
level
READ
COMMITTED
;
start
transaction
;
--
error
ER_DUP_ENTRY
--
error
ER_LOCK_WAIT_TIMEOUT
insert
into
t1
values
(
'RC'
);
commit
;
...
...
@@ -47,7 +54,6 @@ insert into t1 values('RR');
commit
;
set
transaction
isolation
level
REPEATABLE
READ
;
start
transaction
;
--
error
ER_DUP_ENTRY
--
error
ER_LOCK_WAIT_TIMEOUT
insert
into
t1
values
(
'RR'
);
...
...
@@ -60,7 +66,6 @@ insert into t1 values('S');
commit
;
set
transaction
isolation
level
SERIALIZABLE
;
start
transaction
;
--
error
ER_DUP_ENTRY
--
error
ER_LOCK_WAIT_TIMEOUT
insert
into
t1
values
(
'S'
);
commit
;
...
...
mysql-test/main/long_unique_update.test
View file @
bd7f7b14
#
# MDEV-371 Unique indexes for blobs
#
--
echo
#structure of tests;
--
echo
#1 test of table containing single unique blob column;
--
echo
#2 test of table containing another unique int/ varchar etc column;
...
...
mysql-test/main/long_unique_using_hash.test
View file @
bd7f7b14
#
# MDEV-371 Unique indexes for blobs
#
create
table
t1
(
a
blob
,
unique
(
a
)
using
hash
);
--
query_vertical
show
keys
from
t1
;
...
...
sql/share/errmsg-utf8.txt
View file @
bd7f7b14
...
...
@@ -7950,8 +7950,7 @@ ER_PERIOD_NOT_FOUND
eng "Period %`s is not found in table"
ER_PERIOD_COLUMNS_UPDATED
eng "Column %`s used in period %`s specified in update SET list"
ER_PERIOD_CONSTRAINT_DROP
eng "Can't DROP CONSTRAINT `%s`. Use DROP PERIOD `%s` for this"
ER_TOO_LONG_
HASH_KEYSEG
eng "
Max key segment length is 65535
"
ER_TOO_LONG_
KEYPART 42000 S1009
eng "
Specified key part was too long; max key part length is %u bytes
"
sql/sql_show.cc
View file @
bd7f7b14
...
...
@@ -2352,9 +2352,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
if
(
key_part
->
field
&&
(
key_part
->
length
!=
table
->
field
[
key_part
->
fieldnr
-
1
]
->
key_length
()
&&
!
(
key_info
->
flags
&
(
HA_FULLTEXT
|
HA_SPATIAL
)))
&&
(
key_info
->
algorithm
!=
HA_KEY_ALG_LONG_HASH
||
key_info
->
algorithm
==
HA_KEY_ALG_LONG_HASH
&&
key_part
->
length
))
!
(
key_info
->
flags
&
(
HA_FULLTEXT
|
HA_SPATIAL
))))
{
packet
->
append_parenthesized
((
long
)
key_part
->
length
/
key_part
->
field
->
charset
()
->
mbmaxlen
);
...
...
@@ -6644,9 +6642,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
if
(
!
(
key_info
->
flags
&
HA_FULLTEXT
)
&&
(
key_part
->
field
&&
key_part
->
length
!=
show_table
->
s
->
field
[
key_part
->
fieldnr
-
1
]
->
key_length
())
&&
(
key_info
->
algorithm
!=
HA_KEY_ALG_LONG_HASH
||
key_info
->
algorithm
==
HA_KEY_ALG_LONG_HASH
&&
key_part
->
length
))
show_table
->
s
->
field
[
key_part
->
fieldnr
-
1
]
->
key_length
()))
{
table
->
field
[
10
]
->
store
((
longlong
)
key_part
->
length
/
key_part
->
field
->
charset
()
->
mbmaxlen
,
TRUE
);
...
...
sql/sql_table.cc
View file @
bd7f7b14
...
...
@@ -2778,23 +2778,26 @@ bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db,
This will make checking for duplicated keys faster and ensure that
PRIMARY keys are prioritized.
This will not reorder LONG_HASH indexes, because they must match the
order of their LONG_UNIQUE_HASH_FIELD's.
*/
static
int
sort_keys
(
KEY
*
a
,
KEY
*
b
)
{
ulong
a_flags
=
a
->
flags
,
b_flags
=
b
->
flags
;
/*
Do not reorder LONG_HASH indexes, because they must match the order
of their LONG_UNIQUE_HASH_FIELD's.
*/
if
(
a
->
algorithm
==
HA_KEY_ALG_LONG_HASH
&&
b
->
algorithm
==
HA_KEY_ALG_LONG_HASH
)
return
a
->
usable_key_parts
-
b
->
usable_key_parts
;
if
(
a_flags
&
HA_NOSAME
)
{
if
(
!
(
b_flags
&
HA_NOSAME
))
return
-
1
;
if
((
a_flags
^
b_flags
)
&
HA_NULL_PART_KEY
)
{
if
(
a
->
algorithm
==
HA_KEY_ALG_LONG_HASH
&&
b
->
algorithm
==
HA_KEY_ALG_LONG_HASH
)
return
a
->
usable_key_parts
-
b
->
usable_key_parts
;
/* Sort NOT NULL keys before other keys */
return
(
a_flags
&
HA_NULL_PART_KEY
)
?
1
:
-
1
;
}
...
...
@@ -2817,9 +2820,7 @@ static int sort_keys(KEY *a, KEY *b)
Prefer original key order. usable_key_parts contains here
the original key position.
*/
return
((
a
->
usable_key_parts
<
b
->
usable_key_parts
)
?
-
1
:
(
a
->
usable_key_parts
>
b
->
usable_key_parts
)
?
1
:
0
);
return
a
->
usable_key_parts
-
b
->
usable_key_parts
;
}
/*
...
...
@@ -3302,6 +3303,7 @@ static inline void make_long_hash_field_name(LEX_CSTRING *buf, uint num)
buf
->
length
=
my_snprintf
((
char
*
)
buf
->
str
,
LONG_HASH_FIELD_NAME_LENGTH
,
"DB_ROW_HASH_%u"
,
num
);
}
/**
Add fully invisible hash field to table in case of long
unique column
...
...
@@ -3313,7 +3315,6 @@ static Create_field * add_hash_field(THD * thd, List<Create_field> *create_list,
KEY
*
key_info
)
{
List_iterator
<
Create_field
>
it
(
*
create_list
);
// CHARSET_INFO *field_cs;
Create_field
*
dup_field
,
*
cf
=
new
(
thd
->
mem_root
)
Create_field
();
cf
->
flags
|=
UNSIGNED_FLAG
|
LONG_UNIQUE_HASH_FIELD
;
cf
->
decimals
=
0
;
...
...
@@ -3336,18 +3337,6 @@ static Create_field * add_hash_field(THD * thd, List<Create_field> *create_list,
it
.
rewind
();
}
}
/* for (uint i= 0; i < key_info->user_defined_key_parts; i++)
{
dup_field= create_list->elem(key_info->key_part[i].fieldnr);
if (!i)
field_cs= dup_field->charset;
else if(field_cs != dup_field->charset)
{
my_error(ER_MULTIPLE_CS_HASH_KEY, MYF(0));
return NULL;
}
}
cf->charset= field_cs;*/
cf
->
field_name
=
field_name
;
cf
->
set_handler
(
&
type_handler_longlong
);
key_info
->
algorithm
=
HA_KEY_ALG_LONG_HASH
;
...
...
@@ -4095,7 +4084,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
else
{
if
(
key
->
type
==
Key
::
UNIQUE
)
if
(
key
->
type
==
Key
::
UNIQUE
)
{
is_hash_field_needed
=
true
;
}
...
...
@@ -4107,19 +4096,14 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
}
}
/* We can not store key_part_length more then 2^16 - 1 in frm
So we will simply make it zero */
if
(
is_hash_field_needed
&&
column
->
length
>
(
1
<<
16
)
-
1
)
/* We can not store key_part_length more then 2^16 - 1 in frm */
if
(
is_hash_field_needed
&&
column
->
length
>
UINT16_MAX
)
{
my_error
(
ER_TOO_LONG_HASH_KEYSEG
,
MYF
(
0
)
);
my_error
(
ER_TOO_LONG_KEYPART
,
MYF
(
0
),
UINT16_MAX
);
DBUG_RETURN
(
TRUE
);
}
else
key_part_info
->
length
=
(
uint16
)
key_part_length
;
if
(
is_hash_field_needed
&&
(
key_part_info
->
length
==
sql_field
->
char_length
*
sql_field
->
charset
->
mbmaxlen
||
key_part_info
->
length
==
(
1
<<
16
)
-
1
))
key_part_info
->
length
=
0
;
/* Use packed keys for long strings on the first column */
if
(
!
((
*
db_options
)
&
HA_OPTION_NO_PACK_KEYS
)
&&
!
((
create_info
->
table_options
&
HA_OPTION_NO_PACK_KEYS
))
&&
...
...
@@ -8385,13 +8369,6 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
if
(
cfield
->
field
)
// Not new field
{
/*
if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
{
Field *fld= cfield->field;
if (fld->max_display_length() == cfield->length*fld->charset()->mbmaxlen
&& fld->max_data_length() != key_part->length)
cfield->length= cfield->char_length= key_part->length;
}
If the field can't have only a part used in a key according to its
new type, or should not be used partially according to its
previous type, or the field length is less than the key part
...
...
sql/table.cc
View file @
bd7f7b14
...
...
@@ -2443,8 +2443,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if
(
keyinfo
->
algorithm
==
HA_KEY_ALG_LONG_HASH
)
{
share
->
long_unique_table
=
1
;
if
(
share
->
frm_version
<
FRM_VER_EXPRESSSIONS
)
share
->
frm_version
=
FRM_VER_EXPRESSSIONS
;
hash_keypart
=
keyinfo
->
key_part
+
keyinfo
->
user_defined_key_parts
;
hash_keypart
->
length
=
HA_HASH_KEY_LENGTH_WITHOUT_NULL
;
hash_keypart
->
store_length
=
hash_keypart
->
length
;
...
...
@@ -2453,8 +2451,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
hash_keypart
->
key_type
=
32834
;
/* Last n fields are unique_index_hash fields*/
hash_keypart
->
offset
=
offset
;
// hash_keypart->offset= share->reclength
// - HA_HASH_FIELD_LENGTH*(share->fields - hash_field_used_no);
hash_keypart
->
fieldnr
=
hash_field_used_no
+
1
;
hash_field
=
share
->
field
[
hash_field_used_no
];
hash_field
->
flags
|=
LONG_UNIQUE_HASH_FIELD
;
//Used in parse_vcol_defs
...
...
@@ -2566,7 +2562,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
for
(
i
=
0
;
i
<
keyinfo
->
user_defined_key_parts
;
i
++
)
{
uint
fieldnr
=
keyinfo
->
key_part
[
i
].
fieldnr
;
field
=
share
->
field
[
keyinfo
->
key_part
[
i
].
fieldnr
-
1
];
field
=
share
->
field
[
fieldnr
-
1
];
if
(
field
->
null_ptr
)
len_null_byte
=
HA_KEY_NULL_LENGTH
;
...
...
@@ -2581,8 +2577,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
ext_key_length
+=
keyinfo
->
key_part
[
i
].
length
+
len_null_byte
+
length_bytes
;
if
(
share
->
field
[
fieldnr
-
1
]
->
key_length
()
!=
keyinfo
->
key_part
[
i
].
length
)
if
(
field
->
key_length
()
!=
keyinfo
->
key_part
[
i
].
length
)
{
add_keyparts_for_this_key
=
0
;
break
;
...
...
@@ -4258,6 +4253,8 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
{
size_t
key_comment_total_bytes
=
0
;
uint
i
;
uchar
frm_format
=
create_info
->
expression_length
?
FRM_VER_EXPRESSSIONS
:
FRM_VER_TRUE_VARCHAR
;
DBUG_ENTER
(
"prepare_frm_header"
);
/* Fix this when we have new .frm files; Current limit is 4G rows (TODO) */
...
...
@@ -4266,17 +4263,6 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
if
(
create_info
->
min_rows
>
UINT_MAX32
)
create_info
->
min_rows
=
UINT_MAX32
;
size_t
key_length
,
tmp_key_length
,
tmp
,
csid
;
bzero
((
char
*
)
fileinfo
,
FRM_HEADER_SIZE
);
/* header */
fileinfo
[
0
]
=
(
uchar
)
254
;
fileinfo
[
1
]
=
1
;
fileinfo
[
2
]
=
(
create_info
->
expression_length
==
0
?
FRM_VER_TRUE_VARCHAR
:
FRM_VER_EXPRESSSIONS
);
DBUG_ASSERT
(
ha_storage_engine_is_enabled
(
create_info
->
db_type
));
fileinfo
[
3
]
=
(
uchar
)
ha_legacy_type
(
create_info
->
db_type
);
/*
Keep in sync with pack_keys() in unireg.cc
For each key:
...
...
@@ -4295,8 +4281,20 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
(
key_info
[
i
].
comment
.
length
>
0
));
if
(
key_info
[
i
].
flags
&
HA_USES_COMMENT
)
key_comment_total_bytes
+=
2
+
key_info
[
i
].
comment
.
length
;
if
(
key_info
[
i
].
algorithm
==
HA_KEY_ALG_LONG_HASH
)
frm_format
=
FRM_VER_EXPRESSSIONS
;
}
size_t
key_length
,
tmp_key_length
,
tmp
,
csid
;
bzero
((
char
*
)
fileinfo
,
FRM_HEADER_SIZE
);
/* header */
fileinfo
[
0
]
=
(
uchar
)
254
;
fileinfo
[
1
]
=
1
;
fileinfo
[
2
]
=
frm_format
;
DBUG_ASSERT
(
ha_storage_engine_is_enabled
(
create_info
->
db_type
));
fileinfo
[
3
]
=
(
uchar
)
ha_legacy_type
(
create_info
->
db_type
);
key_length
=
keys
*
(
8
+
MAX_REF_PARTS
*
9
+
NAME_LEN
+
1
)
+
16
+
key_comment_total_bytes
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment