Commit 7886d63c authored by Timothy Smith's avatar Timothy Smith

Apply 3 patches from innodb-5.0-ss2637.

This fixes Bug#36149: Read buffer overflow in srv0start.c found during "make
test"

Per-revision comments:

r2484 | vasil | 2008-05-28 15:32:48 +0300 (Wed, 28 May 2008) | 9 lines

Fix Bug#36149 Read buffer overflow in srv0start.c found during "make test"

Use strncmp(3) instead of memcmp(3) to avoid reading past end of the string
if it is empty (*str == '\0'). This bug is _not_ a buffer overflow.

Discussed with:	Sunny (via IM)

r2538 | inaam | 2008-07-15 21:24:02 +0300 (Tue, 15 Jul 2008) | 15 lines

Fix of issue# 4

Fixed a timing hole where a thread dropping an index can free the
in-memory index struct while another thread is still using
that structure to remove entries from adaptive hash index belonging
to one of the pages that belongs to the index being dropped.

The fix is to have a reference counter in the index struct and to
wait for this counter to drop to zero beforing freeing the struct.

Reviewed by: Heikki


r2544 | inaam | 2008-07-22 18:58:11 +0300 (Tue, 22 Jul 2008) | 8 lines

Removed UNIV_INLINE qualifier from btr_search_info_get_ref_count().
Otherwise compilation failed on non-debug builds.

Pointed by: Vasil
parent ae75d953
...@@ -162,6 +162,8 @@ btr_search_info_create( ...@@ -162,6 +162,8 @@ btr_search_info_create(
info->last_search = NULL; info->last_search = NULL;
info->n_direction = 0; info->n_direction = 0;
info->ref_count = 0;
info->root_guess = NULL; info->root_guess = NULL;
info->hash_analysis = 0; info->hash_analysis = 0;
...@@ -183,6 +185,31 @@ btr_search_info_create( ...@@ -183,6 +185,31 @@ btr_search_info_create(
return(info); return(info);
} }
/*********************************************************************
Returns the value of ref_count. The value is protected by
btr_search_latch. */
ulint
btr_search_info_get_ref_count(
/*==========================*/
/* out: ref_count value. */
btr_search_t* info) /* in: search info. */
{
ulint ret;
ut_ad(info);
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
rw_lock_s_lock(&btr_search_latch);
ret = info->ref_count;
rw_lock_s_unlock(&btr_search_latch);
return(ret);
}
/************************************************************************* /*************************************************************************
Updates the search info of an index about hash successes. NOTE that info Updates the search info of an index about hash successes. NOTE that info
is NOT protected by any semaphore, to save CPU time! Do not assume its fields is NOT protected by any semaphore, to save CPU time! Do not assume its fields
...@@ -1019,8 +1046,12 @@ next_rec: ...@@ -1019,8 +1046,12 @@ next_rec:
ha_remove_all_nodes_to_page(table, folds[i], page); ha_remove_all_nodes_to_page(table, folds[i], page);
} }
ut_a(index->search_info->ref_count > 0);
index->search_info->ref_count--;
block->is_hashed = FALSE; block->is_hashed = FALSE;
block->index = NULL; block->index = NULL;
cleanup: cleanup:
if (UNIV_UNLIKELY(block->n_pointers)) { if (UNIV_UNLIKELY(block->n_pointers)) {
/* Corruption */ /* Corruption */
...@@ -1241,6 +1272,15 @@ btr_search_build_page_hash_index( ...@@ -1241,6 +1272,15 @@ btr_search_build_page_hash_index(
goto exit_func; goto exit_func;
} }
/* This counter is decremented every time we drop page
hash index entries and is incremented here. Since we can
rebuild hash index for a page that is already hashed, we
have to take care not to increment the counter in that
case. */
if (!block->is_hashed) {
index->search_info->ref_count++;
}
block->is_hashed = TRUE; block->is_hashed = TRUE;
block->n_hash_helps = 0; block->n_hash_helps = 0;
......
...@@ -1556,6 +1556,8 @@ dict_index_remove_from_cache( ...@@ -1556,6 +1556,8 @@ dict_index_remove_from_cache(
dict_field_t* field; dict_field_t* field;
ulint size; ulint size;
ulint i; ulint i;
ulint retries = 0;
btr_search_t* info;
ut_ad(table && index); ut_ad(table && index);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
...@@ -1564,6 +1566,51 @@ dict_index_remove_from_cache( ...@@ -1564,6 +1566,51 @@ dict_index_remove_from_cache(
ut_ad(mutex_own(&(dict_sys->mutex))); ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
/* We always create search info whether or not adaptive
hash index is enabled or not. */
info = index->search_info;
ut_ad(info);
/* We are not allowed to free the in-memory index struct
dict_index_t until all entries in the adaptive hash index
that point to any of the page belonging to his b-tree index
are dropped. This is so because dropping of these entries
require access to dict_index_t struct. To avoid such scenario
We keep a count of number of such pages in the search_info and
only free the dict_index_t struct when this count drops to
zero. */
for (;;) {
ulint ref_count = btr_search_info_get_ref_count(info);
if (ref_count == 0) {
break;
}
/* Sleep for 10ms before trying again. */
os_thread_sleep(10000);
++retries;
if (retries % 500 == 0) {
/* No luck after 5 seconds of wait. */
fprintf(stderr, "InnoDB: Error: Waited for"
" %lu secs for hash index"
" ref_count (%lu) to drop"
" to 0.\n"
"index: \"%s\""
" table: \"%s\"\n",
retries/100,
ref_count,
index->name,
table->name);
}
/* To avoid a hang here we commit suicide if the
ref_count doesn't drop to zero in 600 seconds. */
if (retries >= 60000) {
ut_error;
}
}
ut_ad(UT_LIST_GET_LEN((index->tree)->tree_indexes) == 1); ut_ad(UT_LIST_GET_LEN((index->tree)->tree_indexes) == 1);
dict_tree_free(index->tree); dict_tree_free(index->tree);
......
...@@ -40,6 +40,14 @@ btr_search_info_create( ...@@ -40,6 +40,14 @@ btr_search_info_create(
/*===================*/ /*===================*/
/* out, own: search info struct */ /* out, own: search info struct */
mem_heap_t* heap); /* in: heap where created */ mem_heap_t* heap); /* in: heap where created */
/*********************************************************************
Returns the value of ref_count. The value is protected by
btr_search_latch. */
ulint
btr_search_info_get_ref_count(
/*==========================*/
/* out: ref_count value. */
btr_search_t* info); /* in: search info. */
/************************************************************************* /*************************************************************************
Updates the search info. */ Updates the search info. */
UNIV_INLINE UNIV_INLINE
...@@ -144,6 +152,13 @@ btr_search_validate(void); ...@@ -144,6 +152,13 @@ btr_search_validate(void);
struct btr_search_struct{ struct btr_search_struct{
ulint magic_n; /* magic number */ ulint magic_n; /* magic number */
ulint ref_count; /* Number of blocks in this index tree
that have search index built
i.e. block->index points to this index.
Protected by btr_search_latch except
when during initialization in
btr_search_info_create(). */
/* The following 4 fields are currently not used: */ /* The following 4 fields are currently not used: */
rec_t* last_search; /* pointer to the lower limit record of the rec_t* last_search; /* pointer to the lower limit record of the
previous search; NULL if not known */ previous search; NULL if not known */
...@@ -154,8 +169,10 @@ struct btr_search_struct{ ...@@ -154,8 +169,10 @@ struct btr_search_struct{
or BTR_SEA_SAME_PAGE */ or BTR_SEA_SAME_PAGE */
dulint modify_clock; /* value of modify clock at the time dulint modify_clock; /* value of modify clock at the time
last_search was stored */ last_search was stored */
/*----------------------*/
/* The following 4 fields are not protected by any latch: */ /* The following fields are not protected by any latch.
Unfortunately, this means that they must be aligned to
the machine word, i.e., they cannot be turned into bit-fields. */
page_t* root_guess; /* the root page frame when it was last time page_t* root_guess; /* the root page frame when it was last time
fetched, or NULL */ fetched, or NULL */
ulint hash_analysis; /* when this exceeds a certain value, the ulint hash_analysis; /* when this exceeds a certain value, the
......
...@@ -180,11 +180,11 @@ srv_parse_data_file_paths_and_sizes( ...@@ -180,11 +180,11 @@ srv_parse_data_file_paths_and_sizes(
str++; str++;
} }
if (0 == memcmp(str, ":autoextend", (sizeof ":autoextend") - 1)) { if (0 == strncmp(str, ":autoextend", (sizeof ":autoextend") - 1)) {
str += (sizeof ":autoextend") - 1; str += (sizeof ":autoextend") - 1;
if (0 == memcmp(str, ":max:", (sizeof ":max:") - 1)) { if (0 == strncmp(str, ":max:", (sizeof ":max:") - 1)) {
str += (sizeof ":max:") - 1; str += (sizeof ":max:") - 1;
...@@ -288,13 +288,13 @@ srv_parse_data_file_paths_and_sizes( ...@@ -288,13 +288,13 @@ srv_parse_data_file_paths_and_sizes(
(*data_file_names)[i] = path; (*data_file_names)[i] = path;
(*data_file_sizes)[i] = size; (*data_file_sizes)[i] = size;
if (0 == memcmp(str, ":autoextend", (sizeof ":autoextend") - 1)) { if (0 == strncmp(str, ":autoextend", (sizeof ":autoextend") - 1)) {
*is_auto_extending = TRUE; *is_auto_extending = TRUE;
str += (sizeof ":autoextend") - 1; str += (sizeof ":autoextend") - 1;
if (0 == memcmp(str, ":max:", (sizeof ":max:") - 1)) { if (0 == strncmp(str, ":max:", (sizeof ":max:") - 1)) {
str += (sizeof ":max:") - 1; str += (sizeof ":max:") - 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment