Commit eed6d215 authored by Sachin's avatar Sachin

MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for...

MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes

Problem:-

So the issue is when we do bulk insert with rows
> MI_MIN_ROWS_TO_DISABLE_INDEXES(100) , We try to disable the indexes to
speedup insert. But current logic also disables the long unique indexes.

Solution:- In ha_myisam::start_bulk_insert if we find long hash index
(HA_KEY_ALG_LONG_HASH) we will not disable the index.

This commit also refactors the mi_disable_indexes_for_rebuild function,
Since this is function is called at only one place, it is inlined into
start_bulk_insert

mi_clear_key_active is added into myisamdef.h because now it is also used
in ha_myisam.cc file.

(Same is done for Aria Storage engine)
parent b615d275
......@@ -396,6 +396,7 @@ int maria_preload(MARIA_HA *info, ulonglong key_map, my_bool ignore_leaves);
void maria_versioning(MARIA_HA *info, my_bool versioning);
void maria_ignore_trids(MARIA_HA *info);
uint maria_max_key_length(void);
my_bool maria_too_big_key_for_sort(MARIA_KEYDEF *key, ha_rows rows);
#define maria_max_key_segments() HA_MAX_KEY_SEG
/* fulltext functions */
......
......@@ -430,6 +430,7 @@ int sort_ft_buf_flush(MI_SORT_PARAM *sort_param);
int thr_write_keys(MI_SORT_PARAM *sort_param);
int sort_write_record(MI_SORT_PARAM *sort_param);
int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, ulonglong);
my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows);
#ifdef __cplusplus
}
......
......@@ -267,3 +267,6 @@ connection default;
DROP TABLE t1, t2;
CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria;
ERROR 42000: Specified key was too long; max key length is 1000 bytes
create table t1(a int, unique(a) using hash);
#BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES)
drop table t1;
......@@ -323,3 +323,20 @@ DROP TABLE t1, t2;
#
--error ER_TOO_LONG_KEY
CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria;
#
# MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes
#
create table t1(a int, unique(a) using hash);
--let $count=150
--let insert_stmt= insert into t1 values(200)
while ($count)
{
--let $insert_stmt=$insert_stmt,($count)
--dec $count
}
--disable_query_log
--echo #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES)
--eval $insert_stmt
--enable_query_log
drop table t1;
......@@ -2157,7 +2157,32 @@ void ha_maria::start_bulk_insert(ha_rows rows, uint flags)
else
{
my_bool all_keys= MY_TEST(flags & HA_CREATE_UNIQUE_INDEX_BY_SORT);
maria_disable_indexes_for_rebuild(file, rows, all_keys);
/*
Deactivate all indexes that can be recreated fast.
These include packed keys on which sorting will use more temporary
space than the max allowed file length or for which the unpacked keys
will take much more space than packed keys.
Note that 'rows' may be zero for the case when we don't know how many
rows we will put into the file.
*/
MARIA_SHARE *share= file->s;
MARIA_KEYDEF *key=share->keyinfo;
uint i;
DBUG_ASSERT(share->state.state.records == 0 &&
(!rows || rows >= MARIA_MIN_ROWS_TO_DISABLE_INDEXES));
for (i=0 ; i < share->base.keys ; i++,key++)
{
if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY | HA_RTREE_INDEX)) &&
! maria_too_big_key_for_sort(key,rows) && share->base.auto_key != i+1 &&
(all_keys || !(key->flag & HA_NOSAME)) &&
table->key_info[i].algorithm != HA_KEY_ALG_LONG_HASH)
{
maria_clear_key_active(share->state.key_map, i);
file->update|= HA_STATE_CHANGED;
file->create_unique_index_by_sort= all_keys;
}
}
}
if (share->now_transactional)
{
......
......@@ -6441,7 +6441,7 @@ static ha_checksum maria_byte_checksum(const uchar *buf, uint length)
return crc;
}
static my_bool maria_too_big_key_for_sort(MARIA_KEYDEF *key, ha_rows rows)
my_bool maria_too_big_key_for_sort(MARIA_KEYDEF *key, ha_rows rows)
{
uint key_maxlength=key->maxlength;
if (key->flag & HA_FULLTEXT)
......@@ -6456,38 +6456,6 @@ static my_bool maria_too_big_key_for_sort(MARIA_KEYDEF *key, ha_rows rows)
(ulonglong) maria_max_temp_length));
}
/*
Deactivate all indexes that can be recreated fast.
These include packed keys on which sorting will use more temporary
space than the max allowed file length or for which the unpacked keys
will take much more space than packed keys.
Note that 'rows' may be zero for the case when we don't know how many
rows we will put into the file.
*/
void maria_disable_indexes_for_rebuild(MARIA_HA *info, ha_rows rows,
my_bool all_keys)
{
MARIA_SHARE *share= info->s;
MARIA_KEYDEF *key=share->keyinfo;
uint i;
DBUG_ASSERT(share->state.state.records == 0 &&
(!rows || rows >= MARIA_MIN_ROWS_TO_DISABLE_INDEXES));
for (i=0 ; i < share->base.keys ; i++,key++)
{
if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY | HA_RTREE_INDEX)) &&
! maria_too_big_key_for_sort(key,rows) && share->base.auto_key != i+1 &&
(all_keys || !(key->flag & HA_NOSAME)))
{
maria_clear_key_active(share->state.key_map, i);
info->update|= HA_STATE_CHANGED;
info->create_unique_index_by_sort= all_keys;
}
}
}
/*
Return TRUE if we can use repair by sorting
One can set the force argument to force to use sorting
......
......@@ -1750,7 +1750,35 @@ void ha_myisam::start_bulk_insert(ha_rows rows, uint flags)
else
{
my_bool all_keys= MY_TEST(flags & HA_CREATE_UNIQUE_INDEX_BY_SORT);
mi_disable_indexes_for_rebuild(file, rows, all_keys);
MYISAM_SHARE *share=file->s;
MI_KEYDEF *key=share->keyinfo;
uint i;
/*
Deactivate all indexes that can be recreated fast.
These include packed keys on which sorting will use more temporary
space than the max allowed file length or for which the unpacked keys
will take much more space than packed keys.
Note that 'rows' may be zero for the case when we don't know how many
rows we will put into the file.
Long Unique Index (HA_KEY_ALG_LONG_HASH) will not be disabled because
there unique property is enforced at the time of ha_write_row
(check_duplicate_long_entries). So we need active index at the time of
insert.
*/
DBUG_ASSERT(file->state->records == 0 &&
(!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES));
for (i=0 ; i < share->base.keys ; i++,key++)
{
if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY)) &&
! mi_too_big_key_for_sort(key,rows) && file->s->base.auto_key != i+1 &&
(all_keys || !(key->flag & HA_NOSAME)) &&
table->key_info[i].algorithm != HA_KEY_ALG_LONG_HASH)
{
mi_clear_key_active(share->state.key_map, i);
file->update|= HA_STATE_CHANGED;
file->create_unique_index_by_sort= all_keys;
}
}
}
}
else
......
......@@ -4667,7 +4667,7 @@ static ha_checksum mi_byte_checksum(const uchar *buf, uint length)
return crc;
}
static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
{
uint key_maxlength=key->maxlength;
if (key->flag & HA_FULLTEXT)
......@@ -4681,38 +4681,6 @@ static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
((ulonglong) rows * key_maxlength > myisam_max_temp_length));
}
/*
Deactivate all indexes that can be recreated fast.
These include packed keys on which sorting will use more temporary
space than the max allowed file length or for which the unpacked keys
will take much more space than packed keys.
Note that 'rows' may be zero for the case when we don't know how many
rows we will put into the file.
*/
void mi_disable_indexes_for_rebuild(MI_INFO *info, ha_rows rows,
my_bool all_keys)
{
MYISAM_SHARE *share=info->s;
MI_KEYDEF *key=share->keyinfo;
uint i;
DBUG_ASSERT(info->state->records == 0 &&
(!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES));
for (i=0 ; i < share->base.keys ; i++,key++)
{
if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY)) &&
! mi_too_big_key_for_sort(key,rows) && info->s->base.auto_key != i+1 &&
(all_keys || !(key->flag & HA_NOSAME)))
{
mi_clear_key_active(share->state.key_map, i);
info->update|= HA_STATE_CHANGED;
info->create_unique_index_by_sort= all_keys;
}
}
}
/*
Return TRUE if we can use repair by sorting
One can set the force argument to force to use sorting
......
......@@ -715,8 +715,6 @@ void mi_restore_status(void *param);
void mi_copy_status(void *to, void *from);
my_bool mi_check_status(void *param);
void mi_fix_status(MI_INFO *org_table, MI_INFO *new_table);
void mi_disable_indexes_for_rebuild(MI_INFO *info, ha_rows rows,
my_bool all_keys);
extern MI_INFO *test_if_reopen(char *filename);
my_bool check_table_is_closed(const char *name, const char *where);
int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment