Commit 6b80cd91 authored by Magne Mahre's avatar Magne Mahre

Bug #37433 Deadlock between open_table, close_open_tables,

                get_table_share, drop_open_table
            
In the partition handler code, LOCK_open and share->LOCK_ha_data
are acquired in the wrong order in certain cases.  When doing a
multi-row INSERT (i.e a INSERT..SELECT) in a table with auto-
increment column(s). the increments must be in a monotonically
continuous increasing sequence (i.e it can't have "holes"). To
achieve this, a lock is held for the duration of the operation.
share->LOCK_ha_data was used for this purpose.
            
Whenever there was a need to open a view _during_ the operation
(views are not currently pre-opened the way tables are), and
LOCK_open was grabbed, a deadlock could occur.  share->LOCK_ha_data
is other places used _while_ holding LOCK_open.
            
A new mutex was introduced in the HA_DATA_PARTITION structure,
for exclusive use of the autoincrement data fields, so we don't
need to overload the use of LOCK_ha_data here.
            
A module test case has not been supplied, since the problem occurs
as a result of a race condition, and testing for this condition 
is thus not deterministic.   Testing for it could be done by
setting up a test case as described in the bug report.
parent ebb0b02c
...@@ -2452,6 +2452,21 @@ err1: ...@@ -2452,6 +2452,21 @@ err1:
/**************************************************************************** /****************************************************************************
MODULE open/close object MODULE open/close object
****************************************************************************/ ****************************************************************************/
/**
A destructor for partition-specific TABLE_SHARE data.
*/
void ha_data_partition_destroy(void *ha_data)
{
if (ha_data)
{
HA_DATA_PARTITION *ha_data_partition= (HA_DATA_PARTITION*) ha_data;
pthread_mutex_destroy(&ha_data_partition->mutex);
}
}
/* /*
Open handler object Open handler object
...@@ -2608,6 +2623,8 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) ...@@ -2608,6 +2623,8 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
} }
DBUG_PRINT("info", ("table_share->ha_data 0x%p", ha_data)); DBUG_PRINT("info", ("table_share->ha_data 0x%p", ha_data));
bzero(ha_data, sizeof(HA_DATA_PARTITION)); bzero(ha_data, sizeof(HA_DATA_PARTITION));
table_share->ha_data_destroy= ha_data_partition_destroy;
pthread_mutex_init(&ha_data->mutex, MY_MUTEX_INIT_FAST);
} }
if (is_not_tmp_table) if (is_not_tmp_table)
pthread_mutex_unlock(&table_share->mutex); pthread_mutex_unlock(&table_share->mutex);
......
...@@ -45,6 +45,7 @@ typedef struct st_ha_data_partition ...@@ -45,6 +45,7 @@ typedef struct st_ha_data_partition
{ {
ulonglong next_auto_inc_val; /**< first non reserved value */ ulonglong next_auto_inc_val; /**< first non reserved value */
bool auto_inc_initialized; bool auto_inc_initialized;
pthread_mutex_t mutex;
} HA_DATA_PARTITION; } HA_DATA_PARTITION;
#define PARTITION_BYTES_IN_POS 2 #define PARTITION_BYTES_IN_POS 2
......
...@@ -1601,6 +1601,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, ...@@ -1601,6 +1601,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
delete crypted; delete crypted;
delete handler_file; delete handler_file;
my_hash_free(&share->name_hash); my_hash_free(&share->name_hash);
if (share->ha_data_destroy)
share->ha_data_destroy(share->ha_data);
open_table_error(share, error, share->open_errno, errarg); open_table_error(share, error, share->open_errno, errarg);
DBUG_RETURN(error); DBUG_RETURN(error);
......
...@@ -419,6 +419,7 @@ struct TABLE_SHARE ...@@ -419,6 +419,7 @@ struct TABLE_SHARE
/** place to store storage engine specific data */ /** place to store storage engine specific data */
void *ha_data; void *ha_data;
void (*ha_data_destroy)(void *); /* An optional destructor for ha_data */
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment