Commit bd7f7b14 authored by Sergei Golubchik's avatar Sergei Golubchik

MDEV-371 Unique Index for long columns

post-merge fixes
parent f6000782
......@@ -1387,13 +1387,13 @@ create table t1(a blob unique) partition by hash(a);
ERROR HY000: A BLOB field is not allowed in partition function
#key length > 2^16 -1
create table t1(a blob, unique(a(65536)));
ERROR HY000: Max key segment length is 65535
ERROR 42000: Specified key part was too long; max key part length is 65535 bytes
create table t1(a blob, unique(a(65535)));
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` blob DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH
UNIQUE KEY `a` (`a`(65535)) USING HASH
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
#64 indexes
......
let datadir=`select @@datadir`;
--source include/have_partition.inc
#
# MDEV-371 Unique indexes for blobs
#
--echo #Structure of tests
--echo #First we will check all option for
--echo #table containing single unique column
......@@ -475,7 +479,7 @@ drop table t1;
--error ER_BLOB_FIELD_IN_PART_FUNC_ERROR
create table t1(a blob unique) partition by hash(a);
--echo #key length > 2^16 -1
--error ER_TOO_LONG_HASH_KEYSEG
--error ER_TOO_LONG_KEYPART
create table t1(a blob, unique(a(65536)));
create table t1(a blob, unique(a(65535)));
show create table t1;
......
--source include/have_debug.inc
--source include/have_innodb.inc
#
# MDEV-371 Unique indexes for blobs
#
--echo #In this test case we will check what will happen in the case of hash collision
SET debug_dbug="d,same_long_unique_hash";
......
......@@ -3,6 +3,16 @@ insert into t1 values('RUC');
insert into t1 values ('RUC');
ERROR 23000: Duplicate entry 'RUC' for key 'a'
drop table t1;
create table t1 (a blob unique , c int unique) engine=innodb;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` blob DEFAULT NULL,
`c` int(11) DEFAULT NULL,
UNIQUE KEY `a` (`a`) USING HASH,
UNIQUE KEY `c` (`c`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
drop table t1;
#test for concurrent insert of long unique in innodb
create table t1(a blob unique) engine= InnoDB;
show create table t1;
......
--source include/have_innodb.inc
#
# MDEV-371 Unique indexes for blobs
#
create table t1(a blob unique) engine= InnoDB;
insert into t1 values('RUC');
--error ER_DUP_ENTRY
insert into t1 values ('RUC');
drop table t1;
create table t1 (a blob unique , c int unique) engine=innodb;
show create table t1;
drop table t1;
--echo #test for concurrent insert of long unique in innodb
create table t1(a blob unique) engine= InnoDB;
show create table t1;
......@@ -33,7 +41,6 @@ insert into t1 values('RC');
commit;
set transaction isolation level READ COMMITTED;
start transaction;
--error ER_DUP_ENTRY
--error ER_LOCK_WAIT_TIMEOUT
insert into t1 values ('RC');
commit;
......@@ -47,7 +54,6 @@ insert into t1 values('RR');
commit;
set transaction isolation level REPEATABLE READ;
start transaction;
--error ER_DUP_ENTRY
--error ER_LOCK_WAIT_TIMEOUT
insert into t1 values ('RR');
......@@ -60,7 +66,6 @@ insert into t1 values('S');
commit;
set transaction isolation level SERIALIZABLE;
start transaction;
--error ER_DUP_ENTRY
--error ER_LOCK_WAIT_TIMEOUT
insert into t1 values ('S');
commit;
......
#
# MDEV-371 Unique indexes for blobs
#
--echo #structure of tests;
--echo #1 test of table containing single unique blob column;
--echo #2 test of table containing another unique int/ varchar etc column;
......
#
# MDEV-371 Unique indexes for blobs
#
create table t1(a blob , unique(a) using hash);
--query_vertical show keys from t1;
......
......@@ -7950,8 +7950,7 @@ ER_PERIOD_NOT_FOUND
eng "Period %`s is not found in table"
ER_PERIOD_COLUMNS_UPDATED
eng "Column %`s used in period %`s specified in update SET list"
ER_PERIOD_CONSTRAINT_DROP
eng "Can't DROP CONSTRAINT `%s`. Use DROP PERIOD `%s` for this"
ER_TOO_LONG_HASH_KEYSEG
eng "Max key segment length is 65535"
ER_TOO_LONG_KEYPART 42000 S1009
eng "Specified key part was too long; max key part length is %u bytes"
......@@ -2352,9 +2352,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
if (key_part->field &&
(key_part->length !=
table->field[key_part->fieldnr-1]->key_length() &&
!(key_info->flags & (HA_FULLTEXT | HA_SPATIAL))) &&
(key_info->algorithm != HA_KEY_ALG_LONG_HASH ||
key_info->algorithm == HA_KEY_ALG_LONG_HASH && key_part->length))
!(key_info->flags & (HA_FULLTEXT | HA_SPATIAL))))
{
packet->append_parenthesized((long) key_part->length /
key_part->field->charset()->mbmaxlen);
......@@ -6644,9 +6642,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
if (!(key_info->flags & HA_FULLTEXT) &&
(key_part->field &&
key_part->length !=
show_table->s->field[key_part->fieldnr-1]->key_length()) &&
(key_info->algorithm != HA_KEY_ALG_LONG_HASH ||
key_info->algorithm == HA_KEY_ALG_LONG_HASH && key_part->length))
show_table->s->field[key_part->fieldnr-1]->key_length()))
{
table->field[10]->store((longlong) key_part->length /
key_part->field->charset()->mbmaxlen, TRUE);
......
This diff is collapsed.
......@@ -1177,10 +1177,10 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
/* Now, initialize CURRENT_TIMESTAMP and UNIQUE_INDEX_HASH_FIELD fields */
for (field_ptr= table->field; *field_ptr; field_ptr++)
{
{
Field *field= *field_ptr;
if (field->flags & LONG_UNIQUE_HASH_FIELD)
{
{
List<Item> *field_list= new (mem_root) List<Item>();
Item *list_item;
KEY *key;
......@@ -2443,8 +2443,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
{
share->long_unique_table= 1;
if (share->frm_version < FRM_VER_EXPRESSSIONS)
share->frm_version= FRM_VER_EXPRESSSIONS;
hash_keypart= keyinfo->key_part + keyinfo->user_defined_key_parts;
hash_keypart->length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
hash_keypart->store_length= hash_keypart->length;
......@@ -2453,8 +2451,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
hash_keypart->key_type= 32834;
/* Last n fields are unique_index_hash fields*/
hash_keypart->offset= offset;
// hash_keypart->offset= share->reclength
// - HA_HASH_FIELD_LENGTH*(share->fields - hash_field_used_no);
hash_keypart->fieldnr= hash_field_used_no + 1;
hash_field= share->field[hash_field_used_no];
hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs
......@@ -2472,7 +2468,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
KEY* key_first_info= NULL;
if (primary_key >= MAX_KEY && keyinfo->flags & HA_NOSAME &&
keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
{
/*
If the UNIQUE key doesn't have NULL columns and is not a part key
......@@ -2507,7 +2503,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
if (share->use_ext_keys)
{
{
if (primary_key >= MAX_KEY)
{
add_first_key_parts= 0;
......@@ -2566,7 +2562,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
for (i= 0; i < keyinfo->user_defined_key_parts; i++)
{
uint fieldnr= keyinfo->key_part[i].fieldnr;
field= share->field[keyinfo->key_part[i].fieldnr-1];
field= share->field[fieldnr-1];
if (field->null_ptr)
len_null_byte= HA_KEY_NULL_LENGTH;
......@@ -2581,8 +2577,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
ext_key_length+= keyinfo->key_part[i].length + len_null_byte
+ length_bytes;
if (share->field[fieldnr-1]->key_length() !=
keyinfo->key_part[i].length)
if (field->key_length() != keyinfo->key_part[i].length)
{
add_keyparts_for_this_key= 0;
break;
......@@ -4258,6 +4253,8 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
{
size_t key_comment_total_bytes= 0;
uint i;
uchar frm_format= create_info->expression_length ? FRM_VER_EXPRESSSIONS
: FRM_VER_TRUE_VARCHAR;
DBUG_ENTER("prepare_frm_header");
/* Fix this when we have new .frm files; Current limit is 4G rows (TODO) */
......@@ -4266,17 +4263,6 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
if (create_info->min_rows > UINT_MAX32)
create_info->min_rows= UINT_MAX32;
size_t key_length, tmp_key_length, tmp, csid;
bzero((char*) fileinfo, FRM_HEADER_SIZE);
/* header */
fileinfo[0]=(uchar) 254;
fileinfo[1]= 1;
fileinfo[2]= (create_info->expression_length == 0 ? FRM_VER_TRUE_VARCHAR :
FRM_VER_EXPRESSSIONS);
DBUG_ASSERT(ha_storage_engine_is_enabled(create_info->db_type));
fileinfo[3]= (uchar) ha_legacy_type(create_info->db_type);
/*
Keep in sync with pack_keys() in unireg.cc
For each key:
......@@ -4295,8 +4281,20 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
(key_info[i].comment.length > 0));
if (key_info[i].flags & HA_USES_COMMENT)
key_comment_total_bytes += 2 + key_info[i].comment.length;
if (key_info[i].algorithm == HA_KEY_ALG_LONG_HASH)
frm_format= FRM_VER_EXPRESSSIONS;
}
size_t key_length, tmp_key_length, tmp, csid;
bzero((char*) fileinfo, FRM_HEADER_SIZE);
/* header */
fileinfo[0]=(uchar) 254;
fileinfo[1]= 1;
fileinfo[2]= frm_format;
DBUG_ASSERT(ha_storage_engine_is_enabled(create_info->db_type));
fileinfo[3]= (uchar) ha_legacy_type(create_info->db_type);
key_length= keys * (8 + MAX_REF_PARTS * 9 + NAME_LEN + 1) + 16
+ key_comment_total_bytes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment