Commit 7667e8f9 authored by Marko Mäkelä's avatar Marko Mäkelä

Merge mysql-5.1 to mysql-5.5.

parents 9de021c7 41b97529
...@@ -1895,7 +1895,7 @@ btr_cur_update_in_place( ...@@ -1895,7 +1895,7 @@ btr_cur_update_in_place(
was_delete_marked = rec_get_deleted_flag( was_delete_marked = rec_get_deleted_flag(
rec, page_is_comp(buf_block_get_frame(block))); rec, page_is_comp(buf_block_get_frame(block)));
is_hashed = block->is_hashed; is_hashed = (block->index != NULL);
if (is_hashed) { if (is_hashed) {
/* TO DO: Can we skip this if none of the fields /* TO DO: Can we skip this if none of the fields
......
...@@ -44,12 +44,8 @@ Created 2/17/1996 Heikki Tuuri ...@@ -44,12 +44,8 @@ Created 2/17/1996 Heikki Tuuri
#include "ha0ha.h" #include "ha0ha.h"
/** Flag: has the search system been enabled? /** Flag: has the search system been enabled?
Protected by btr_search_latch and btr_search_enabled_mutex. */ Protected by btr_search_latch. */
UNIV_INTERN char btr_search_enabled = TRUE; UNIV_INTERN char btr_search_enabled = TRUE;
UNIV_INTERN ibool btr_search_fully_disabled = FALSE;
/** Mutex protecting btr_search_enabled */
static mutex_t btr_search_enabled_mutex;
#ifdef UNIV_PFS_MUTEX #ifdef UNIV_PFS_MUTEX
/* Key to register btr_search_enabled_mutex with performance schema */ /* Key to register btr_search_enabled_mutex with performance schema */
...@@ -180,8 +176,6 @@ btr_search_sys_create( ...@@ -180,8 +176,6 @@ btr_search_sys_create(
rw_lock_create(btr_search_latch_key, &btr_search_latch, rw_lock_create(btr_search_latch_key, &btr_search_latch,
SYNC_SEARCH_SYS); SYNC_SEARCH_SYS);
mutex_create(btr_search_enabled_mutex_key,
&btr_search_enabled_mutex, SYNC_SEARCH_SYS_CONF);
btr_search_sys = mem_alloc(sizeof(btr_search_sys_t)); btr_search_sys = mem_alloc(sizeof(btr_search_sys_t));
...@@ -211,27 +205,37 @@ void ...@@ -211,27 +205,37 @@ void
btr_search_disable(void) btr_search_disable(void)
/*====================*/ /*====================*/
{ {
mutex_enter(&btr_search_enabled_mutex); dict_table_t* table;
mutex_enter(&dict_sys->mutex);
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
/* Disable access to hash index, also tell ha_insert_for_fold()
stop adding new nodes to hash index, but still allow updating
existing nodes */
btr_search_enabled = FALSE; btr_search_enabled = FALSE;
/* Clear all block->is_hashed flags and remove all entries /* Clear the index->search_info->ref_count of every index in
from btr_search_sys->hash_index. */ the data dictionary cache. */
buf_pool_drop_hash_index(); for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); table;
table = UT_LIST_GET_NEXT(table_LRU, table)) {
/* hash index has been cleaned up, disallow any operation to dict_index_t* index;
the hash index */
btr_search_fully_disabled = TRUE;
/* btr_search_enabled_mutex should guarantee this. */ for (index = dict_table_get_first_index(table); index;
ut_ad(!btr_search_enabled); index = dict_table_get_next_index(index)) {
index->search_info->ref_count = 0;
}
}
mutex_exit(&dict_sys->mutex);
/* Set all block->index = NULL. */
buf_pool_clear_hash_index();
/* Clear the adaptive hash index. */
hash_table_clear(btr_search_sys->hash_index);
mem_heap_empty(btr_search_sys->hash_index->heap);
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
mutex_exit(&btr_search_enabled_mutex);
} }
/********************************************************************//** /********************************************************************//**
...@@ -241,14 +245,11 @@ void ...@@ -241,14 +245,11 @@ void
btr_search_enable(void) btr_search_enable(void)
/*====================*/ /*====================*/
{ {
mutex_enter(&btr_search_enabled_mutex);
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
btr_search_enabled = TRUE; btr_search_enabled = TRUE;
btr_search_fully_disabled = FALSE;
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
mutex_exit(&btr_search_enabled_mutex);
} }
/*****************************************************************//** /*****************************************************************//**
...@@ -471,7 +472,7 @@ btr_search_update_block_hash_info( ...@@ -471,7 +472,7 @@ btr_search_update_block_hash_info(
&& (block->n_bytes == info->n_bytes) && (block->n_bytes == info->n_bytes)
&& (block->left_side == info->left_side)) { && (block->left_side == info->left_side)) {
if ((block->is_hashed) if ((block->index)
&& (block->curr_n_fields == info->n_fields) && (block->curr_n_fields == info->n_fields)
&& (block->curr_n_bytes == info->n_bytes) && (block->curr_n_bytes == info->n_bytes)
&& (block->curr_left_side == info->left_side)) { && (block->curr_left_side == info->left_side)) {
...@@ -500,7 +501,7 @@ btr_search_update_block_hash_info( ...@@ -500,7 +501,7 @@ btr_search_update_block_hash_info(
/ BTR_SEARCH_PAGE_BUILD_LIMIT) / BTR_SEARCH_PAGE_BUILD_LIMIT)
&& (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) { && (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
if ((!block->is_hashed) if ((!block->index)
|| (block->n_hash_helps || (block->n_hash_helps
> 2 * page_get_n_recs(block->frame)) > 2 * page_get_n_recs(block->frame))
|| (block->n_fields != block->curr_n_fields) || (block->n_fields != block->curr_n_fields)
...@@ -532,9 +533,9 @@ btr_search_update_hash_ref( ...@@ -532,9 +533,9 @@ btr_search_update_hash_ref(
buf_block_t* block, /*!< in: buffer block where cursor positioned */ buf_block_t* block, /*!< in: buffer block where cursor positioned */
btr_cur_t* cursor) /*!< in: cursor */ btr_cur_t* cursor) /*!< in: cursor */
{ {
dict_index_t* index;
ulint fold; ulint fold;
rec_t* rec; const rec_t* rec;
index_id_t index_id;
ut_ad(cursor->flag == BTR_CUR_HASH_FAIL); ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
...@@ -545,13 +546,15 @@ btr_search_update_hash_ref( ...@@ -545,13 +546,15 @@ btr_search_update_hash_ref(
ut_ad(page_align(btr_cur_get_rec(cursor)) ut_ad(page_align(btr_cur_get_rec(cursor))
== buf_block_get_frame(block)); == buf_block_get_frame(block));
if (!block->is_hashed) { index = block->index;
if (!index) {
return; return;
} }
ut_a(block->index == cursor->index); ut_a(index == cursor->index);
ut_a(!dict_index_is_ibuf(cursor->index)); ut_a(!dict_index_is_ibuf(index));
if ((info->n_hash_potential > 0) if ((info->n_hash_potential > 0)
&& (block->curr_n_fields == info->n_fields) && (block->curr_n_fields == info->n_fields)
...@@ -568,12 +571,11 @@ btr_search_update_hash_ref( ...@@ -568,12 +571,11 @@ btr_search_update_hash_ref(
return; return;
} }
index_id = cursor->index->id;
fold = rec_fold(rec, fold = rec_fold(rec,
rec_get_offsets(rec, cursor->index, offsets_, rec_get_offsets(rec, index, offsets_,
ULINT_UNDEFINED, &heap), ULINT_UNDEFINED, &heap),
block->curr_n_fields, block->curr_n_fields,
block->curr_n_bytes, index_id); block->curr_n_bytes, index->id);
if (UNIV_LIKELY_NULL(heap)) { if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap); mem_heap_free(heap);
} }
...@@ -837,7 +839,7 @@ btr_search_guess_on_hash( ...@@ -837,7 +839,7 @@ btr_search_guess_on_hash(
{ {
buf_pool_t* buf_pool; buf_pool_t* buf_pool;
buf_block_t* block; buf_block_t* block;
rec_t* rec; const rec_t* rec;
ulint fold; ulint fold;
index_id_t index_id; index_id_t index_id;
#ifdef notdefined #ifdef notdefined
...@@ -923,7 +925,7 @@ btr_search_guess_on_hash( ...@@ -923,7 +925,7 @@ btr_search_guess_on_hash(
ut_ad(page_rec_is_user_rec(rec)); ut_ad(page_rec_is_user_rec(rec));
btr_cur_position(index, rec, block, cursor); btr_cur_position(index, (rec_t*) rec, block, cursor);
/* Check the validity of the guess within the page */ /* Check the validity of the guess within the page */
...@@ -1053,15 +1055,16 @@ btr_search_drop_page_hash_index( ...@@ -1053,15 +1055,16 @@ btr_search_drop_page_hash_index(
retry: retry:
rw_lock_s_lock(&btr_search_latch); rw_lock_s_lock(&btr_search_latch);
page = block->frame; index = block->index;
if (UNIV_LIKELY(!block->is_hashed)) { if (UNIV_LIKELY(!index)) {
rw_lock_s_unlock(&btr_search_latch); rw_lock_s_unlock(&btr_search_latch);
return; return;
} }
ut_a(!dict_index_is_ibuf(index));
table = btr_search_sys->hash_index; table = btr_search_sys->hash_index;
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
...@@ -1072,8 +1075,6 @@ retry: ...@@ -1072,8 +1075,6 @@ retry:
n_fields = block->curr_n_fields; n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes; n_bytes = block->curr_n_bytes;
index = block->index;
ut_a(!dict_index_is_ibuf(index));
/* NOTE: The fields of block must not be accessed after /* NOTE: The fields of block must not be accessed after
releasing btr_search_latch, as the index page might only releasing btr_search_latch, as the index page might only
...@@ -1083,6 +1084,7 @@ retry: ...@@ -1083,6 +1084,7 @@ retry:
ut_a(n_fields + n_bytes > 0); ut_a(n_fields + n_bytes > 0);
page = block->frame;
n_recs = page_get_n_recs(page); n_recs = page_get_n_recs(page);
/* Calculate and cache fold values into an array for fast deletion /* Calculate and cache fold values into an array for fast deletion
...@@ -1131,7 +1133,7 @@ next_rec: ...@@ -1131,7 +1133,7 @@ next_rec:
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
if (UNIV_UNLIKELY(!block->is_hashed)) { if (UNIV_UNLIKELY(!block->index)) {
/* Someone else has meanwhile dropped the hash index */ /* Someone else has meanwhile dropped the hash index */
goto cleanup; goto cleanup;
...@@ -1159,7 +1161,6 @@ next_rec: ...@@ -1159,7 +1161,6 @@ next_rec:
ut_a(index->search_info->ref_count > 0); ut_a(index->search_info->ref_count > 0);
index->search_info->ref_count--; index->search_info->ref_count--;
block->is_hashed = FALSE;
block->index = NULL; block->index = NULL;
cleanup: cleanup:
...@@ -1187,8 +1188,8 @@ cleanup: ...@@ -1187,8 +1188,8 @@ cleanup:
} }
/********************************************************************//** /********************************************************************//**
Drops a page hash index when a page is freed from a fseg to the file system. Drops a possible page hash index when a page is evicted from the buffer pool
Drops possible hash index if the page happens to be in the buffer pool. */ or freed in a file segment. */
UNIV_INTERN UNIV_INTERN
void void
btr_search_drop_page_hash_when_freed( btr_search_drop_page_hash_when_freed(
...@@ -1201,28 +1202,19 @@ btr_search_drop_page_hash_when_freed( ...@@ -1201,28 +1202,19 @@ btr_search_drop_page_hash_when_freed(
buf_block_t* block; buf_block_t* block;
mtr_t mtr; mtr_t mtr;
if (!buf_page_peek_if_search_hashed(space, page_no)) {
return;
}
mtr_start(&mtr); mtr_start(&mtr);
/* We assume that if the caller has a latch on the page, then the /* If the caller has a latch on the page, then the caller must
caller has already dropped the hash index for the page, and we never have a x-latch on the page and it must have already dropped
get here. Therefore we can acquire the s-latch to the page without the hash index for the page. Because of the x-latch that we
having to fear a deadlock. */ are possibly holding, we cannot s-latch the page, but must
(recursively) x-latch it, even though we are only reading. */
block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL, block = buf_page_get_gen(space, zip_size, page_no, RW_X_LATCH, NULL,
BUF_PEEK_IF_IN_POOL, __FILE__, __LINE__, BUF_PEEK_IF_IN_POOL, __FILE__, __LINE__,
&mtr); &mtr);
/* Because the buffer pool mutex was released by
buf_page_peek_if_search_hashed(), it is possible that the
block was removed from the buffer pool by another thread
before buf_page_get_gen() got a chance to acquire the buffer
pool mutex again. Thus, we must check for a NULL return. */
if (UNIV_LIKELY(block != NULL)) { if (block && block->index) {
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH); buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
...@@ -1254,7 +1246,6 @@ btr_search_build_page_hash_index( ...@@ -1254,7 +1246,6 @@ btr_search_build_page_hash_index(
rec_t* next_rec; rec_t* next_rec;
ulint fold; ulint fold;
ulint next_fold; ulint next_fold;
index_id_t index_id;
ulint n_cached; ulint n_cached;
ulint n_recs; ulint n_recs;
ulint* folds; ulint* folds;
...@@ -1268,9 +1259,6 @@ btr_search_build_page_hash_index( ...@@ -1268,9 +1259,6 @@ btr_search_build_page_hash_index(
ut_ad(index); ut_ad(index);
ut_a(!dict_index_is_ibuf(index)); ut_a(!dict_index_is_ibuf(index));
table = btr_search_sys->hash_index;
page = buf_block_get_frame(block);
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED) ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
...@@ -1279,7 +1267,15 @@ btr_search_build_page_hash_index( ...@@ -1279,7 +1267,15 @@ btr_search_build_page_hash_index(
rw_lock_s_lock(&btr_search_latch); rw_lock_s_lock(&btr_search_latch);
if (block->is_hashed && ((block->curr_n_fields != n_fields) if (!btr_search_enabled) {
rw_lock_s_unlock(&btr_search_latch);
return;
}
table = btr_search_sys->hash_index;
page = buf_block_get_frame(block);
if (block->index && ((block->curr_n_fields != n_fields)
|| (block->curr_n_bytes != n_bytes) || (block->curr_n_bytes != n_bytes)
|| (block->curr_left_side != left_side))) { || (block->curr_left_side != left_side))) {
...@@ -1318,7 +1314,7 @@ btr_search_build_page_hash_index( ...@@ -1318,7 +1314,7 @@ btr_search_build_page_hash_index(
n_cached = 0; n_cached = 0;
index_id = btr_page_get_index_id(page); ut_a(index->id == btr_page_get_index_id(page));
rec = page_rec_get_next(page_get_infimum_rec(page)); rec = page_rec_get_next(page_get_infimum_rec(page));
...@@ -1333,7 +1329,7 @@ btr_search_build_page_hash_index( ...@@ -1333,7 +1329,7 @@ btr_search_build_page_hash_index(
} }
} }
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id); fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
if (left_side) { if (left_side) {
...@@ -1360,7 +1356,7 @@ btr_search_build_page_hash_index( ...@@ -1360,7 +1356,7 @@ btr_search_build_page_hash_index(
offsets = rec_get_offsets(next_rec, index, offsets, offsets = rec_get_offsets(next_rec, index, offsets,
n_fields + (n_bytes > 0), &heap); n_fields + (n_bytes > 0), &heap);
next_fold = rec_fold(next_rec, offsets, n_fields, next_fold = rec_fold(next_rec, offsets, n_fields,
n_bytes, index_id); n_bytes, index->id);
if (fold != next_fold) { if (fold != next_fold) {
/* Insert an entry into the hash index */ /* Insert an entry into the hash index */
...@@ -1385,11 +1381,11 @@ btr_search_build_page_hash_index( ...@@ -1385,11 +1381,11 @@ btr_search_build_page_hash_index(
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
if (UNIV_UNLIKELY(btr_search_fully_disabled)) { if (UNIV_UNLIKELY(!btr_search_enabled)) {
goto exit_func; goto exit_func;
} }
if (block->is_hashed && ((block->curr_n_fields != n_fields) if (block->index && ((block->curr_n_fields != n_fields)
|| (block->curr_n_bytes != n_bytes) || (block->curr_n_bytes != n_bytes)
|| (block->curr_left_side != left_side))) { || (block->curr_left_side != left_side))) {
goto exit_func; goto exit_func;
...@@ -1400,11 +1396,10 @@ btr_search_build_page_hash_index( ...@@ -1400,11 +1396,10 @@ btr_search_build_page_hash_index(
rebuild hash index for a page that is already hashed, we rebuild hash index for a page that is already hashed, we
have to take care not to increment the counter in that have to take care not to increment the counter in that
case. */ case. */
if (!block->is_hashed) { if (!block->index) {
index->search_info->ref_count++; index->search_info->ref_count++;
} }
block->is_hashed = TRUE;
block->n_hash_helps = 0; block->n_hash_helps = 0;
block->curr_n_fields = n_fields; block->curr_n_fields = n_fields;
...@@ -1452,14 +1447,15 @@ btr_search_move_or_delete_hash_entries( ...@@ -1452,14 +1447,15 @@ btr_search_move_or_delete_hash_entries(
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX)); ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
ut_a(!new_block->is_hashed || new_block->index == index);
ut_a(!block->is_hashed || block->index == index);
ut_a(!(new_block->is_hashed || block->is_hashed)
|| !dict_index_is_ibuf(index));
rw_lock_s_lock(&btr_search_latch); rw_lock_s_lock(&btr_search_latch);
if (new_block->is_hashed) { ut_a(!new_block->index || new_block->index == index);
ut_a(!block->index || block->index == index);
ut_a(!(new_block->index || block->index)
|| !dict_index_is_ibuf(index));
if (new_block->index) {
rw_lock_s_unlock(&btr_search_latch); rw_lock_s_unlock(&btr_search_latch);
...@@ -1468,7 +1464,7 @@ btr_search_move_or_delete_hash_entries( ...@@ -1468,7 +1464,7 @@ btr_search_move_or_delete_hash_entries(
return; return;
} }
if (block->is_hashed) { if (block->index) {
n_fields = block->curr_n_fields; n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes; n_bytes = block->curr_n_bytes;
...@@ -1505,42 +1501,48 @@ btr_search_update_hash_on_delete( ...@@ -1505,42 +1501,48 @@ btr_search_update_hash_on_delete(
{ {
hash_table_t* table; hash_table_t* table;
buf_block_t* block; buf_block_t* block;
rec_t* rec; const rec_t* rec;
ulint fold; ulint fold;
index_id_t index_id; dict_index_t* index;
ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint offsets_[REC_OFFS_NORMAL_SIZE];
mem_heap_t* heap = NULL; mem_heap_t* heap = NULL;
rec_offs_init(offsets_); rec_offs_init(offsets_);
rec = btr_cur_get_rec(cursor);
block = btr_cur_get_block(cursor); block = btr_cur_get_block(cursor);
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
if (!block->is_hashed) { index = block->index;
if (!index) {
return; return;
} }
ut_a(block->index == cursor->index); ut_a(index == cursor->index);
ut_a(block->curr_n_fields + block->curr_n_bytes > 0); ut_a(block->curr_n_fields + block->curr_n_bytes > 0);
ut_a(!dict_index_is_ibuf(cursor->index)); ut_a(!dict_index_is_ibuf(index));
table = btr_search_sys->hash_index; table = btr_search_sys->hash_index;
index_id = cursor->index->id; rec = btr_cur_get_rec(cursor);
fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_,
fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_,
ULINT_UNDEFINED, &heap), ULINT_UNDEFINED, &heap),
block->curr_n_fields, block->curr_n_bytes, index_id); block->curr_n_fields, block->curr_n_bytes, index->id);
if (UNIV_LIKELY_NULL(heap)) { if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap); mem_heap_free(heap);
} }
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
if (block->index) {
ut_a(block->index == index);
ha_search_and_delete_if_found(table, fold, rec); ha_search_and_delete_if_found(table, fold, rec);
}
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
} }
...@@ -1558,6 +1560,7 @@ btr_search_update_hash_node_on_insert( ...@@ -1558,6 +1560,7 @@ btr_search_update_hash_node_on_insert(
{ {
hash_table_t* table; hash_table_t* table;
buf_block_t* block; buf_block_t* block;
dict_index_t* index;
rec_t* rec; rec_t* rec;
rec = btr_cur_get_rec(cursor); rec = btr_cur_get_rec(cursor);
...@@ -1568,16 +1571,25 @@ btr_search_update_hash_node_on_insert( ...@@ -1568,16 +1571,25 @@ btr_search_update_hash_node_on_insert(
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
if (!block->is_hashed) { index = block->index;
if (!index) {
return; return;
} }
ut_a(block->index == cursor->index); ut_a(cursor->index == index);
ut_a(!dict_index_is_ibuf(cursor->index)); ut_a(!dict_index_is_ibuf(index));
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
if (!block->index) {
goto func_exit;
}
ut_a(block->index == index);
if ((cursor->flag == BTR_CUR_HASH) if ((cursor->flag == BTR_CUR_HASH)
&& (cursor->n_fields == block->curr_n_fields) && (cursor->n_fields == block->curr_n_fields)
&& (cursor->n_bytes == block->curr_n_bytes) && (cursor->n_bytes == block->curr_n_bytes)
...@@ -1588,6 +1600,7 @@ btr_search_update_hash_node_on_insert( ...@@ -1588,6 +1600,7 @@ btr_search_update_hash_node_on_insert(
ha_search_and_update_if_found(table, cursor->fold, rec, ha_search_and_update_if_found(table, cursor->fold, rec,
block, page_rec_get_next(rec)); block, page_rec_get_next(rec));
func_exit:
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
} else { } else {
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
...@@ -1609,10 +1622,10 @@ btr_search_update_hash_on_insert( ...@@ -1609,10 +1622,10 @@ btr_search_update_hash_on_insert(
{ {
hash_table_t* table; hash_table_t* table;
buf_block_t* block; buf_block_t* block;
dict_index_t* index;
rec_t* rec; rec_t* rec;
rec_t* ins_rec; rec_t* ins_rec;
rec_t* next_rec; rec_t* next_rec;
index_id_t index_id;
ulint fold; ulint fold;
ulint ins_fold; ulint ins_fold;
ulint next_fold = 0; /* remove warning (??? bug ???) */ ulint next_fold = 0; /* remove warning (??? bug ???) */
...@@ -1637,15 +1650,15 @@ btr_search_update_hash_on_insert( ...@@ -1637,15 +1650,15 @@ btr_search_update_hash_on_insert(
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
if (!block->is_hashed) { index = block->index;
if (!index) {
return; return;
} }
ut_a(block->index == cursor->index); ut_a(index == cursor->index);
ut_a(!dict_index_is_ibuf(cursor->index)); ut_a(!dict_index_is_ibuf(index));
index_id = cursor->index->id;
n_fields = block->curr_n_fields; n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes; n_bytes = block->curr_n_bytes;
...@@ -1654,21 +1667,21 @@ btr_search_update_hash_on_insert( ...@@ -1654,21 +1667,21 @@ btr_search_update_hash_on_insert(
ins_rec = page_rec_get_next(rec); ins_rec = page_rec_get_next(rec);
next_rec = page_rec_get_next(ins_rec); next_rec = page_rec_get_next(ins_rec);
offsets = rec_get_offsets(ins_rec, cursor->index, offsets, offsets = rec_get_offsets(ins_rec, index, offsets,
ULINT_UNDEFINED, &heap); ULINT_UNDEFINED, &heap);
ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index_id); ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index->id);
if (!page_rec_is_supremum(next_rec)) { if (!page_rec_is_supremum(next_rec)) {
offsets = rec_get_offsets(next_rec, cursor->index, offsets, offsets = rec_get_offsets(next_rec, index, offsets,
n_fields + (n_bytes > 0), &heap); n_fields + (n_bytes > 0), &heap);
next_fold = rec_fold(next_rec, offsets, n_fields, next_fold = rec_fold(next_rec, offsets, n_fields,
n_bytes, index_id); n_bytes, index->id);
} }
if (!page_rec_is_infimum(rec)) { if (!page_rec_is_infimum(rec)) {
offsets = rec_get_offsets(rec, cursor->index, offsets, offsets = rec_get_offsets(rec, index, offsets,
n_fields + (n_bytes > 0), &heap); n_fields + (n_bytes > 0), &heap);
fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id); fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id);
} else { } else {
if (left_side) { if (left_side) {
...@@ -1676,6 +1689,10 @@ btr_search_update_hash_on_insert( ...@@ -1676,6 +1689,10 @@ btr_search_update_hash_on_insert(
locked = TRUE; locked = TRUE;
if (!btr_search_enabled) {
goto function_exit;
}
ha_insert_for_fold(table, ins_fold, block, ins_rec); ha_insert_for_fold(table, ins_fold, block, ins_rec);
} }
...@@ -1689,6 +1706,10 @@ btr_search_update_hash_on_insert( ...@@ -1689,6 +1706,10 @@ btr_search_update_hash_on_insert(
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
locked = TRUE; locked = TRUE;
if (!btr_search_enabled) {
goto function_exit;
}
} }
if (!left_side) { if (!left_side) {
...@@ -1707,6 +1728,10 @@ check_next_rec: ...@@ -1707,6 +1728,10 @@ check_next_rec:
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
locked = TRUE; locked = TRUE;
if (!btr_search_enabled) {
goto function_exit;
}
} }
ha_insert_for_fold(table, ins_fold, block, ins_rec); ha_insert_for_fold(table, ins_fold, block, ins_rec);
...@@ -1722,6 +1747,10 @@ check_next_rec: ...@@ -1722,6 +1747,10 @@ check_next_rec:
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
locked = TRUE; locked = TRUE;
if (!btr_search_enabled) {
goto function_exit;
}
} }
if (!left_side) { if (!left_side) {
...@@ -1729,7 +1758,7 @@ check_next_rec: ...@@ -1729,7 +1758,7 @@ check_next_rec:
ha_insert_for_fold(table, ins_fold, block, ins_rec); ha_insert_for_fold(table, ins_fold, block, ins_rec);
/* /*
fputs("Hash insert for ", stderr); fputs("Hash insert for ", stderr);
dict_index_name_print(stderr, cursor->index); dict_index_name_print(stderr, index);
fprintf(stderr, " fold %lu\n", ins_fold); fprintf(stderr, " fold %lu\n", ins_fold);
*/ */
} else { } else {
...@@ -1832,21 +1861,20 @@ btr_search_validate(void) ...@@ -1832,21 +1861,20 @@ btr_search_validate(void)
ut_a(!dict_index_is_ibuf(block->index)); ut_a(!dict_index_is_ibuf(block->index));
offsets = rec_get_offsets((const rec_t*) node->data, page_index_id = btr_page_get_index_id(block->frame);
offsets = rec_get_offsets(node->data,
block->index, offsets, block->index, offsets,
block->curr_n_fields block->curr_n_fields
+ (block->curr_n_bytes > 0), + (block->curr_n_bytes > 0),
&heap); &heap);
page_index_id = btr_page_get_index_id(block->frame); if (!block->index || node->fold
!= rec_fold(node->data,
if (UNIV_UNLIKELY
(!block->is_hashed || node->fold
!= rec_fold((rec_t*)(node->data),
offsets, offsets,
block->curr_n_fields, block->curr_n_fields,
block->curr_n_bytes, block->curr_n_bytes,
page_index_id))) { page_index_id)) {
const page_t* page = block->frame; const page_t* page = block->frame;
ok = FALSE; ok = FALSE;
...@@ -1862,20 +1890,19 @@ btr_search_validate(void) ...@@ -1862,20 +1890,19 @@ btr_search_validate(void)
node->data, node->data,
(ullint) page_index_id, (ullint) page_index_id,
(ulong) node->fold, (ulong) node->fold,
(ulong) rec_fold((rec_t*)(node->data), (ulong) rec_fold(node->data,
offsets, offsets,
block->curr_n_fields, block->curr_n_fields,
block->curr_n_bytes, block->curr_n_bytes,
page_index_id)); page_index_id));
fputs("InnoDB: Record ", stderr); fputs("InnoDB: Record ", stderr);
rec_print_new(stderr, (rec_t*)node->data, rec_print_new(stderr, node->data, offsets);
offsets);
fprintf(stderr, "\nInnoDB: on that page." fprintf(stderr, "\nInnoDB: on that page."
" Page mem address %p, is hashed %lu," " Page mem address %p, is hashed %p,"
" n fields %lu, n bytes %lu\n" " n fields %lu, n bytes %lu\n"
"InnoDB: side %lu\n", "InnoDB: side %lu\n",
(void*) page, (ulong) block->is_hashed, (void*) page, (void*) block->index,
(ulong) block->curr_n_fields, (ulong) block->curr_n_fields,
(ulong) block->curr_n_bytes, (ulong) block->curr_n_bytes,
(ulong) block->curr_left_side); (ulong) block->curr_left_side);
......
...@@ -873,8 +873,6 @@ buf_block_init( ...@@ -873,8 +873,6 @@ buf_block_init(
block->check_index_page_at_flush = FALSE; block->check_index_page_at_flush = FALSE;
block->index = NULL; block->index = NULL;
block->is_hashed = FALSE;
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
block->page.in_page_hash = FALSE; block->page.in_page_hash = FALSE;
block->page.in_zip_hash = FALSE; block->page.in_zip_hash = FALSE;
...@@ -1279,108 +1277,47 @@ buf_pool_free( ...@@ -1279,108 +1277,47 @@ buf_pool_free(
} }
/********************************************************************//** /********************************************************************//**
Drops adaptive hash index for a buffer pool instance. */ Clears the adaptive hash index on all pages in the buffer pool. */
static UNIV_INTERN
void void
buf_pool_drop_hash_index_instance( buf_pool_clear_hash_index(void)
/*==============================*/ /*===========================*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
ibool* released_search_latch) /*!< out: flag for signalling
whether the search latch was
released */
{ {
ulint p;
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(!btr_search_enabled);
for (p = 0; p < srv_buf_pool_instances; p++) {
buf_pool_t* buf_pool = buf_pool_from_array(p);
buf_chunk_t* chunks = buf_pool->chunks; buf_chunk_t* chunks = buf_pool->chunks;
buf_chunk_t* chunk = chunks + buf_pool->n_chunks; buf_chunk_t* chunk = chunks + buf_pool->n_chunks;
while (--chunk >= chunks) { while (--chunk >= chunks) {
ulint i;
buf_block_t* block = chunk->blocks; buf_block_t* block = chunk->blocks;
ulint i = chunk->size;
for (i = chunk->size; i--; block++) { for (; i--; block++) {
/* block->is_hashed cannot be modified dict_index_t* index = block->index;
/* We can set block->index = NULL
when we have an x-latch on btr_search_latch; when we have an x-latch on btr_search_latch;
see the comment in buf0buf.h */ see the comment in buf0buf.h */
if (!block->is_hashed) { if (!index) {
/* Not hashed */
continue; continue;
} }
/* To follow the latching order, we block->index = NULL;
have to release btr_search_latch # if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
before acquiring block->latch. */ block->n_pointers = 0;
rw_lock_x_unlock(&btr_search_latch); # endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/* When we release the search latch,
we must rescan all blocks, because
some may become hashed again. */
*released_search_latch = TRUE;
rw_lock_x_lock(&block->lock);
/* This should be guaranteed by the
callers, which will be holding
btr_search_enabled_mutex. */
ut_ad(!btr_search_enabled);
/* Because we did not buffer-fix the
block by calling buf_block_get_gen(),
it is possible that the block has been
allocated for some other use after
btr_search_latch was released above.
We do not care which file page the
block is mapped to. All we want to do
is to drop any hash entries referring
to the page. */
/* It is possible that
block->page.state != BUF_FILE_PAGE.
Even that does not matter, because
btr_search_drop_page_hash_index() will
check block->is_hashed before doing
anything. block->is_hashed can only
be set on uncompressed file pages. */
btr_search_drop_page_hash_index(block);
rw_lock_x_unlock(&block->lock);
rw_lock_x_lock(&btr_search_latch);
ut_ad(!btr_search_enabled);
} }
} }
}
/********************************************************************//**
Drops the adaptive hash index. To prevent a livelock, this function
is only to be called while holding btr_search_latch and while
btr_search_enabled == FALSE. */
UNIV_INTERN
void
buf_pool_drop_hash_index(void)
/*==========================*/
{
ibool released_search_latch;
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(!btr_search_enabled);
do {
ulint i;
released_search_latch = FALSE;
for (i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool;
buf_pool = buf_pool_from_array(i);
buf_pool_drop_hash_index_instance(
buf_pool, &released_search_latch);
} }
} while (released_search_latch);
} }
/********************************************************************//** /********************************************************************//**
...@@ -1740,38 +1677,6 @@ buf_reset_check_index_page_at_flush( ...@@ -1740,38 +1677,6 @@ buf_reset_check_index_page_at_flush(
buf_pool_mutex_exit(buf_pool); buf_pool_mutex_exit(buf_pool);
} }
/********************************************************************//**
Returns the current state of is_hashed of a page. FALSE if the page is
not in the pool. NOTE that this operation does not fix the page in the
pool if it is found there.
@return TRUE if page hash index is built in search system */
UNIV_INTERN
ibool
buf_page_peek_if_search_hashed(
/*===========================*/
ulint space, /*!< in: space id */
ulint offset) /*!< in: page number */
{
buf_block_t* block;
ibool is_hashed;
buf_pool_t* buf_pool = buf_pool_get(space, offset);
buf_pool_mutex_enter(buf_pool);
block = (buf_block_t*) buf_page_hash_get(buf_pool, space, offset);
if (!block || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
is_hashed = FALSE;
} else {
ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page));
is_hashed = block->is_hashed;
}
buf_pool_mutex_exit(buf_pool);
return(is_hashed);
}
#if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG #if defined UNIV_DEBUG_FILE_ACCESSES || defined UNIV_DEBUG
/********************************************************************//** /********************************************************************//**
Sets file_page_was_freed TRUE if the page is found in the buffer pool. Sets file_page_was_freed TRUE if the page is found in the buffer pool.
...@@ -1981,7 +1886,6 @@ buf_block_init_low( ...@@ -1981,7 +1886,6 @@ buf_block_init_low(
block->index = NULL; block->index = NULL;
block->n_hash_helps = 0; block->n_hash_helps = 0;
block->is_hashed = FALSE;
block->n_fields = 1; block->n_fields = 1;
block->n_bytes = 0; block->n_bytes = 0;
block->left_side = TRUE; block->left_side = TRUE;
......
...@@ -273,7 +273,7 @@ next_page: ...@@ -273,7 +273,7 @@ next_page:
mutex_enter(&((buf_block_t*) bpage)->mutex); mutex_enter(&((buf_block_t*) bpage)->mutex);
is_fixed = bpage->buf_fix_count > 0 is_fixed = bpage->buf_fix_count > 0
|| !((buf_block_t*) bpage)->is_hashed; || !((buf_block_t*) bpage)->index;
mutex_exit(&((buf_block_t*) bpage)->mutex); mutex_exit(&((buf_block_t*) bpage)->mutex);
if (is_fixed) { if (is_fixed) {
...@@ -405,7 +405,7 @@ scan_again: ...@@ -405,7 +405,7 @@ scan_again:
if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) { if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
/* This is a compressed-only block /* This is a compressed-only block
descriptor. Do nothing. */ descriptor. Do nothing. */
} else if (((buf_block_t*) bpage)->is_hashed) { } else if (((buf_block_t*) bpage)->index) {
ulint page_no; ulint page_no;
ulint zip_size; ulint zip_size;
...@@ -417,7 +417,7 @@ scan_again: ...@@ -417,7 +417,7 @@ scan_again:
mutex_exit(block_mutex); mutex_exit(block_mutex);
/* Note that the following call will acquire /* Note that the following call will acquire
an S-latch on the page */ and release an X-latch on the page. */
btr_search_drop_page_hash_when_freed( btr_search_drop_page_hash_when_freed(
id, zip_size, page_no); id, zip_size, page_no);
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -88,40 +88,6 @@ ha_create_func( ...@@ -88,40 +88,6 @@ ha_create_func(
return(table); return(table);
} }
/*************************************************************//**
Empties a hash table and frees the memory heaps. */
UNIV_INTERN
void
ha_clear(
/*=====*/
hash_table_t* table) /*!< in, own: hash table */
{
ulint i;
ulint n;
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EXCLUSIVE));
#endif /* UNIV_SYNC_DEBUG */
#ifndef UNIV_HOTBACKUP
/* Free the memory heaps. */
n = table->n_mutexes;
for (i = 0; i < n; i++) {
mem_heap_free(table->heaps[i]);
}
#endif /* !UNIV_HOTBACKUP */
/* Clear the hash table. */
n = hash_get_n_cells(table);
for (i = 0; i < n; i++) {
hash_get_nth_cell(table, i)->node = NULL;
}
}
/*************************************************************//** /*************************************************************//**
Inserts an entry into a hash table. If an entry with the same fold number Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node is found, its node is updated to point to the new data, and no new node
...@@ -140,7 +106,7 @@ ha_insert_for_fold_func( ...@@ -140,7 +106,7 @@ ha_insert_for_fold_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */ buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* data) /*!< in: data, must not be NULL */ const rec_t* data) /*!< in: data, must not be NULL */
{ {
hash_cell_t* cell; hash_cell_t* cell;
ha_node_t* node; ha_node_t* node;
...@@ -153,7 +119,11 @@ ha_insert_for_fold_func( ...@@ -153,7 +119,11 @@ ha_insert_for_fold_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(block->frame == page_align(data)); ut_a(block->frame == page_align(data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ASSERT_HASH_MUTEX_OWN(table, fold); ASSERT_HASH_MUTEX_OWN(table, fold);
ut_ad(btr_search_enabled);
hash = hash_calc_hash(fold, table); hash = hash_calc_hash(fold, table);
...@@ -173,7 +143,6 @@ ha_insert_for_fold_func( ...@@ -173,7 +143,6 @@ ha_insert_for_fold_func(
prev_block->n_pointers--; prev_block->n_pointers--;
block->n_pointers++; block->n_pointers++;
} }
ut_ad(!btr_search_fully_disabled);
# endif /* !UNIV_HOTBACKUP */ # endif /* !UNIV_HOTBACKUP */
prev_node->block = block; prev_node->block = block;
...@@ -186,13 +155,6 @@ ha_insert_for_fold_func( ...@@ -186,13 +155,6 @@ ha_insert_for_fold_func(
prev_node = prev_node->next; prev_node = prev_node->next;
} }
/* We are in the process of disabling hash index, do not add
new chain node */
if (!btr_search_enabled) {
ut_ad(!btr_search_fully_disabled);
return(TRUE);
}
/* We have to allocate a new chain node */ /* We have to allocate a new chain node */
node = mem_heap_alloc(hash_get_heap(table, fold), sizeof(ha_node_t)); node = mem_heap_alloc(hash_get_heap(table, fold), sizeof(ha_node_t));
...@@ -250,6 +212,10 @@ ha_delete_hash_node( ...@@ -250,6 +212,10 @@ ha_delete_hash_node(
{ {
ut_ad(table); ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(btr_search_enabled);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
# ifndef UNIV_HOTBACKUP # ifndef UNIV_HOTBACKUP
if (table->adaptive) { if (table->adaptive) {
...@@ -272,11 +238,11 @@ ha_search_and_update_if_found_func( ...@@ -272,11 +238,11 @@ ha_search_and_update_if_found_func(
/*===============================*/ /*===============================*/
hash_table_t* table, /*!< in/out: hash table */ hash_table_t* table, /*!< in/out: hash table */
ulint fold, /*!< in: folded value of the searched data */ ulint fold, /*!< in: folded value of the searched data */
void* data, /*!< in: pointer to the data */ const rec_t* data, /*!< in: pointer to the data */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* new_block,/*!< in: block containing new_data */ buf_block_t* new_block,/*!< in: block containing new_data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* new_data)/*!< in: new pointer to the data */ const rec_t* new_data)/*!< in: new pointer to the data */
{ {
ha_node_t* node; ha_node_t* node;
...@@ -286,6 +252,13 @@ ha_search_and_update_if_found_func( ...@@ -286,6 +252,13 @@ ha_search_and_update_if_found_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(new_block->frame == page_align(new_data)); ut_a(new_block->frame == page_align(new_data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
if (!btr_search_enabled) {
return;
}
node = ha_search_with_data(table, fold, data); node = ha_search_with_data(table, fold, data);
...@@ -322,6 +295,10 @@ ha_remove_all_nodes_to_page( ...@@ -322,6 +295,10 @@ ha_remove_all_nodes_to_page(
ut_ad(table); ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ASSERT_HASH_MUTEX_OWN(table, fold); ASSERT_HASH_MUTEX_OWN(table, fold);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(btr_search_enabled);
node = ha_chain_get_first(table, fold); node = ha_chain_get_first(table, fold);
......
...@@ -2583,7 +2583,6 @@ innobase_change_buffering_inited_ok: ...@@ -2583,7 +2583,6 @@ innobase_change_buffering_inited_ok:
/* Get the current high water mark format. */ /* Get the current high water mark format. */
innobase_file_format_max = (char*) trx_sys_file_format_max_get(); innobase_file_format_max = (char*) trx_sys_file_format_max_get();
btr_search_fully_disabled = (!btr_search_enabled);
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
error: error:
DBUG_RETURN(TRUE); DBUG_RETURN(TRUE);
......
...@@ -3955,7 +3955,7 @@ ibuf_insert_to_index_page( ...@@ -3955,7 +3955,7 @@ ibuf_insert_to_index_page(
ut_ad(ibuf_inside(mtr)); ut_ad(ibuf_inside(mtr));
ut_ad(dtuple_check_typed(entry)); ut_ad(dtuple_check_typed(entry));
ut_ad(!buf_block_align(page)->is_hashed); ut_ad(!buf_block_align(page)->index);
if (UNIV_UNLIKELY(dict_table_is_comp(index->table) if (UNIV_UNLIKELY(dict_table_is_comp(index->table)
!= (ibool)!!page_is_comp(page))) { != (ibool)!!page_is_comp(page))) {
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -141,8 +141,8 @@ btr_search_drop_page_hash_index( ...@@ -141,8 +141,8 @@ btr_search_drop_page_hash_index(
for which we know that for which we know that
block->buf_fix_count == 0 */ block->buf_fix_count == 0 */
/********************************************************************//** /********************************************************************//**
Drops a page hash index when a page is freed from a fseg to the file system. Drops a possible page hash index when a page is evicted from the buffer pool
Drops possible hash index if the page happens to be in the buffer pool. */ or freed in a file segment. */
UNIV_INTERN UNIV_INTERN
void void
btr_search_drop_page_hash_when_freed( btr_search_drop_page_hash_when_freed(
...@@ -192,16 +192,6 @@ btr_search_validate(void); ...@@ -192,16 +192,6 @@ btr_search_validate(void);
# define btr_search_validate() TRUE # define btr_search_validate() TRUE
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */ #endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
/** Flag: has the search system been enabled?
Protected by btr_search_latch and btr_search_enabled_mutex. */
extern char btr_search_enabled;
/** Flag: whether the search system has completed its disabling process,
It is set to TRUE right after buf_pool_drop_hash_index() in
btr_search_disable(), indicating hash index entries are cleaned up.
Protected by btr_search_latch and btr_search_enabled_mutex. */
extern ibool btr_search_fully_disabled;
/** The search info struct in an index */ /** The search info struct in an index */
struct btr_search_struct{ struct btr_search_struct{
ulint ref_count; /*!< Number of blocks in this index tree ulint ref_count; /*!< Number of blocks in this index tree
...@@ -270,24 +260,6 @@ struct btr_search_sys_struct{ ...@@ -270,24 +260,6 @@ struct btr_search_sys_struct{
/** The adaptive hash index */ /** The adaptive hash index */
extern btr_search_sys_t* btr_search_sys; extern btr_search_sys_t* btr_search_sys;
/** @brief The latch protecting the adaptive search system
This latch protects the
(1) hash index;
(2) columns of a record to which we have a pointer in the hash index;
but does NOT protect:
(3) next record offset field in a record;
(4) next or previous records on the same page.
Bear in mind (3) and (4) when using the hash index.
*/
extern rw_lock_t* btr_search_latch_temp;
/** The latch protecting the adaptive search system */
#define btr_search_latch (*btr_search_latch_temp)
#ifdef UNIV_SEARCH_PERF_STAT #ifdef UNIV_SEARCH_PERF_STAT
/** Number of successful adaptive hash index lookups */ /** Number of successful adaptive hash index lookups */
extern ulint btr_search_n_succ; extern ulint btr_search_n_succ;
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved. Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -30,6 +30,7 @@ Created 2/17/1996 Heikki Tuuri ...@@ -30,6 +30,7 @@ Created 2/17/1996 Heikki Tuuri
#include "rem0types.h" #include "rem0types.h"
#include "page0types.h" #include "page0types.h"
#include "sync0rw.h"
/** Persistent cursor */ /** Persistent cursor */
typedef struct btr_pcur_struct btr_pcur_t; typedef struct btr_pcur_struct btr_pcur_t;
...@@ -38,6 +39,28 @@ typedef struct btr_cur_struct btr_cur_t; ...@@ -38,6 +39,28 @@ typedef struct btr_cur_struct btr_cur_t;
/** B-tree search information for the adaptive hash index */ /** B-tree search information for the adaptive hash index */
typedef struct btr_search_struct btr_search_t; typedef struct btr_search_struct btr_search_t;
/** @brief The latch protecting the adaptive search system
This latch protects the
(1) hash index;
(2) columns of a record to which we have a pointer in the hash index;
but does NOT protect:
(3) next record offset field in a record;
(4) next or previous records on the same page.
Bear in mind (3) and (4) when using the hash index.
*/
extern rw_lock_t* btr_search_latch_temp;
/** The latch protecting the adaptive search system */
#define btr_search_latch (*btr_search_latch_temp)
/** Flag: has the search system been enabled?
Protected by btr_search_latch. */
extern char btr_search_enabled;
#ifdef UNIV_BLOB_DEBUG #ifdef UNIV_BLOB_DEBUG
# include "buf0types.h" # include "buf0types.h"
/** An index->blobs entry for keeping track of off-page column references */ /** An index->blobs entry for keeping track of off-page column references */
......
...@@ -229,13 +229,11 @@ buf_pool_free( ...@@ -229,13 +229,11 @@ buf_pool_free(
ulint n_instances); /*!< in: numbere of instances to free */ ulint n_instances); /*!< in: numbere of instances to free */
/********************************************************************//** /********************************************************************//**
Drops the adaptive hash index. To prevent a livelock, this function Clears the adaptive hash index on all pages in the buffer pool. */
is only to be called while holding btr_search_latch and while
btr_search_enabled == FALSE. */
UNIV_INTERN UNIV_INTERN
void void
buf_pool_drop_hash_index(void); buf_pool_clear_hash_index(void);
/*==========================*/ /*===========================*/
/********************************************************************//** /********************************************************************//**
Relocate a buffer control block. Relocates the block on the LRU list Relocate a buffer control block. Relocates the block on the LRU list
...@@ -568,17 +566,6 @@ buf_page_peek_if_too_old( ...@@ -568,17 +566,6 @@ buf_page_peek_if_too_old(
/*=====================*/ /*=====================*/
const buf_page_t* bpage); /*!< in: block to make younger */ const buf_page_t* bpage); /*!< in: block to make younger */
/********************************************************************//** /********************************************************************//**
Returns the current state of is_hashed of a page. FALSE if the page is
not in the pool. NOTE that this operation does not fix the page in the
pool if it is found there.
@return TRUE if page hash index is built in search system */
UNIV_INTERN
ibool
buf_page_peek_if_search_hashed(
/*===========================*/
ulint space, /*!< in: space id */
ulint offset);/*!< in: page number */
/********************************************************************//**
Gets the youngest modification log sequence number for a frame. Gets the youngest modification log sequence number for a frame.
Returns zero if not file page or no modification occurred yet. Returns zero if not file page or no modification occurred yet.
@return newest modification to page */ @return newest modification to page */
...@@ -1526,13 +1513,16 @@ struct buf_block_struct{ ...@@ -1526,13 +1513,16 @@ struct buf_block_struct{
/* @} */ /* @} */
/** @name Hash search fields /** @name Hash search fields
These 6 fields may only be modified when we have These 5 fields may only be modified when we have
an x-latch on btr_search_latch AND an x-latch on btr_search_latch AND
- we are holding an s-latch or x-latch on buf_block_struct::lock or - we are holding an s-latch or x-latch on buf_block_struct::lock or
- we know that buf_block_struct::buf_fix_count == 0. - we know that buf_block_struct::buf_fix_count == 0.
An exception to this is when we init or create a page An exception to this is when we init or create a page
in the buffer pool in buf0buf.c. */ in the buffer pool in buf0buf.c.
Another exception is that assigning block->index = NULL
is allowed whenever holding an x-latch on btr_search_latch. */
/* @{ */ /* @{ */
...@@ -1541,20 +1531,20 @@ struct buf_block_struct{ ...@@ -1541,20 +1531,20 @@ struct buf_block_struct{
pointers in the adaptive hash index pointers in the adaptive hash index
pointing to this frame */ pointing to this frame */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
unsigned is_hashed:1; /*!< TRUE if hash index has
already been built on this
page; note that it does not
guarantee that the index is
complete, though: there may
have been hash collisions,
record deletions, etc. */
unsigned curr_n_fields:10;/*!< prefix length for hash indexing: unsigned curr_n_fields:10;/*!< prefix length for hash indexing:
number of full fields */ number of full fields */
unsigned curr_n_bytes:15;/*!< number of bytes in hash unsigned curr_n_bytes:15;/*!< number of bytes in hash
indexing */ indexing */
unsigned curr_left_side:1;/*!< TRUE or FALSE in hash indexing */ unsigned curr_left_side:1;/*!< TRUE or FALSE in hash indexing */
dict_index_t* index; /*!< Index for which the adaptive dict_index_t* index; /*!< Index for which the
hash index has been created. */ adaptive hash index has been
created, or NULL if the page
does not exist in the
index. Note that it does not
guarantee that the index is
complete, though: there may
have been hash collisions,
record deletions, etc. */
/* @} */ /* @} */
# ifdef UNIV_SYNC_DEBUG # ifdef UNIV_SYNC_DEBUG
/** @name Debug fields */ /** @name Debug fields */
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -31,13 +31,14 @@ Created 8/18/1994 Heikki Tuuri ...@@ -31,13 +31,14 @@ Created 8/18/1994 Heikki Tuuri
#include "hash0hash.h" #include "hash0hash.h"
#include "page0types.h" #include "page0types.h"
#include "buf0types.h" #include "buf0types.h"
#include "rem0types.h"
/*************************************************************//** /*************************************************************//**
Looks for an element in a hash table. Looks for an element in a hash table.
@return pointer to the data of the first hash table node in chain @return pointer to the data of the first hash table node in chain
having the fold number, NULL if not found */ having the fold number, NULL if not found */
UNIV_INLINE UNIV_INLINE
void* const rec_t*
ha_search_and_get_data( ha_search_and_get_data(
/*===================*/ /*===================*/
hash_table_t* table, /*!< in: hash table */ hash_table_t* table, /*!< in: hash table */
...@@ -51,11 +52,11 @@ ha_search_and_update_if_found_func( ...@@ -51,11 +52,11 @@ ha_search_and_update_if_found_func(
/*===============================*/ /*===============================*/
hash_table_t* table, /*!< in/out: hash table */ hash_table_t* table, /*!< in/out: hash table */
ulint fold, /*!< in: folded value of the searched data */ ulint fold, /*!< in: folded value of the searched data */
void* data, /*!< in: pointer to the data */ const rec_t* data, /*!< in: pointer to the data */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* new_block,/*!< in: block containing new_data */ buf_block_t* new_block,/*!< in: block containing new_data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* new_data);/*!< in: new pointer to the data */ const rec_t* new_data);/*!< in: new pointer to the data */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Looks for an element when we know the pointer to the data and /** Looks for an element when we know the pointer to the data and
...@@ -113,14 +114,6 @@ chosen to be a slightly bigger prime number. ...@@ -113,14 +114,6 @@ chosen to be a slightly bigger prime number.
# define ha_create(n_c,n_m,level) ha_create_func(n_c,n_m) # define ha_create(n_c,n_m,level) ha_create_func(n_c,n_m)
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
/*************************************************************//**
Empties a hash table and frees the memory heaps. */
UNIV_INTERN
void
ha_clear(
/*=====*/
hash_table_t* table); /*!< in, own: hash table */
/*************************************************************//** /*************************************************************//**
Inserts an entry into a hash table. If an entry with the same fold number Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node is found, its node is updated to point to the new data, and no new node
...@@ -138,7 +131,7 @@ ha_insert_for_fold_func( ...@@ -138,7 +131,7 @@ ha_insert_for_fold_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */ buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* data); /*!< in: data, must not be NULL */ const rec_t* data); /*!< in: data, must not be NULL */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** /**
...@@ -174,7 +167,7 @@ ha_search_and_delete_if_found( ...@@ -174,7 +167,7 @@ ha_search_and_delete_if_found(
/*==========================*/ /*==========================*/
hash_table_t* table, /*!< in: hash table */ hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: folded value of the searched data */ ulint fold, /*!< in: folded value of the searched data */
void* data); /*!< in: pointer to the data */ const rec_t* data); /*!< in: pointer to the data */
#ifndef UNIV_HOTBACKUP #ifndef UNIV_HOTBACKUP
/*****************************************************************//** /*****************************************************************//**
Removes from the chain determined by fold all nodes whose data pointer Removes from the chain determined by fold all nodes whose data pointer
...@@ -217,7 +210,7 @@ struct ha_node_struct { ...@@ -217,7 +210,7 @@ struct ha_node_struct {
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block; /*!< buffer block containing the data, or NULL */ buf_block_t* block; /*!< buffer block containing the data, or NULL */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* data; /*!< pointer to the data */ const rec_t* data; /*!< pointer to the data */
ulint fold; /*!< fold value for the data */ ulint fold; /*!< fold value for the data */
}; };
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1994, 2009, Innobase Oy. All Rights Reserved. Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -25,6 +25,7 @@ Created 8/18/1994 Heikki Tuuri ...@@ -25,6 +25,7 @@ Created 8/18/1994 Heikki Tuuri
#include "ut0rnd.h" #include "ut0rnd.h"
#include "mem0mem.h" #include "mem0mem.h"
#include "btr0types.h"
/***********************************************************//** /***********************************************************//**
Deletes a hash node. */ Deletes a hash node. */
...@@ -39,10 +40,10 @@ ha_delete_hash_node( ...@@ -39,10 +40,10 @@ ha_delete_hash_node(
Gets a hash node data. Gets a hash node data.
@return pointer to the data */ @return pointer to the data */
UNIV_INLINE UNIV_INLINE
void* const rec_t*
ha_node_get_data( ha_node_get_data(
/*=============*/ /*=============*/
ha_node_t* node) /*!< in: hash chain node */ const ha_node_t* node) /*!< in: hash chain node */
{ {
return(node->data); return(node->data);
} }
...@@ -57,7 +58,7 @@ ha_node_set_data_func( ...@@ -57,7 +58,7 @@ ha_node_set_data_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */ buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
void* data) /*!< in: pointer to the data */ const rec_t* data) /*!< in: pointer to the data */
{ {
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
node->block = block; node->block = block;
...@@ -105,41 +106,12 @@ ha_chain_get_first( ...@@ -105,41 +106,12 @@ ha_chain_get_first(
hash_get_nth_cell(table, hash_calc_hash(fold, table))->node); hash_get_nth_cell(table, hash_calc_hash(fold, table))->node);
} }
/*************************************************************//**
Looks for an element in a hash table.
@return pointer to the first hash table node in chain having the fold
number, NULL if not found */
UNIV_INLINE
ha_node_t*
ha_search(
/*======*/
hash_table_t* table, /*!< in: hash table */
ulint fold) /*!< in: folded value of the searched data */
{
ha_node_t* node;
ASSERT_HASH_MUTEX_OWN(table, fold);
node = ha_chain_get_first(table, fold);
while (node) {
if (node->fold == fold) {
return(node);
}
node = ha_chain_get_next(node);
}
return(NULL);
}
/*************************************************************//** /*************************************************************//**
Looks for an element in a hash table. Looks for an element in a hash table.
@return pointer to the data of the first hash table node in chain @return pointer to the data of the first hash table node in chain
having the fold number, NULL if not found */ having the fold number, NULL if not found */
UNIV_INLINE UNIV_INLINE
void* const rec_t*
ha_search_and_get_data( ha_search_and_get_data(
/*===================*/ /*===================*/
hash_table_t* table, /*!< in: hash table */ hash_table_t* table, /*!< in: hash table */
...@@ -148,6 +120,10 @@ ha_search_and_get_data( ...@@ -148,6 +120,10 @@ ha_search_and_get_data(
ha_node_t* node; ha_node_t* node;
ASSERT_HASH_MUTEX_OWN(table, fold); ASSERT_HASH_MUTEX_OWN(table, fold);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(btr_search_enabled);
node = ha_chain_get_first(table, fold); node = ha_chain_get_first(table, fold);
...@@ -172,12 +148,14 @@ ha_search_with_data( ...@@ -172,12 +148,14 @@ ha_search_with_data(
/*================*/ /*================*/
hash_table_t* table, /*!< in: hash table */ hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: folded value of the searched data */ ulint fold, /*!< in: folded value of the searched data */
void* data) /*!< in: pointer to the data */ const rec_t* data) /*!< in: pointer to the data */
{ {
ha_node_t* node; ha_node_t* node;
ASSERT_HASH_MUTEX_OWN(table, fold); ASSERT_HASH_MUTEX_OWN(table, fold);
ut_ad(btr_search_enabled);
node = ha_chain_get_first(table, fold); node = ha_chain_get_first(table, fold);
while (node) { while (node) {
...@@ -202,11 +180,15 @@ ha_search_and_delete_if_found( ...@@ -202,11 +180,15 @@ ha_search_and_delete_if_found(
/*==========================*/ /*==========================*/
hash_table_t* table, /*!< in: hash table */ hash_table_t* table, /*!< in: hash table */
ulint fold, /*!< in: folded value of the searched data */ ulint fold, /*!< in: folded value of the searched data */
void* data) /*!< in: pointer to the data */ const rec_t* data) /*!< in: pointer to the data */
{ {
ha_node_t* node; ha_node_t* node;
ASSERT_HASH_MUTEX_OWN(table, fold); ASSERT_HASH_MUTEX_OWN(table, fold);
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(btr_search_enabled);
node = ha_search_with_data(table, fold, data); node = ha_search_with_data(table, fold, data);
......
...@@ -28,7 +28,6 @@ Created 12/27/1996 Heikki Tuuri ...@@ -28,7 +28,6 @@ Created 12/27/1996 Heikki Tuuri
# include "trx0trx.h" # include "trx0trx.h"
# include "trx0undo.h" # include "trx0undo.h"
# include "row0row.h" # include "row0row.h"
# include "btr0sea.h"
#endif /* !UNIV_HOTBACKUP */ #endif /* !UNIV_HOTBACKUP */
#include "page0zip.h" #include "page0zip.h"
......
...@@ -670,7 +670,6 @@ or row lock! */ ...@@ -670,7 +670,6 @@ or row lock! */
#define SYNC_LOG_FLUSH_ORDER 147 #define SYNC_LOG_FLUSH_ORDER 147
#define SYNC_RECV 168 #define SYNC_RECV 168
#define SYNC_WORK_QUEUE 162 #define SYNC_WORK_QUEUE 162
#define SYNC_SEARCH_SYS_CONF 161 /* for assigning btr_search_enabled */
#define SYNC_SEARCH_SYS 160 /* NOTE that if we have a memory #define SYNC_SEARCH_SYS 160 /* NOTE that if we have a memory
heap that can be extended to the heap that can be extended to the
buffer pool, its logical level is buffer pool, its logical level is
......
...@@ -215,12 +215,6 @@ page_set_max_trx_id( ...@@ -215,12 +215,6 @@ page_set_max_trx_id(
{ {
page_t* page = buf_block_get_frame(block); page_t* page = buf_block_get_frame(block);
#ifndef UNIV_HOTBACKUP #ifndef UNIV_HOTBACKUP
const ibool is_hashed = block->is_hashed;
if (is_hashed) {
rw_lock_x_lock(&btr_search_latch);
}
ut_ad(!mtr || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); ut_ad(!mtr || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
#endif /* !UNIV_HOTBACKUP */ #endif /* !UNIV_HOTBACKUP */
...@@ -241,12 +235,6 @@ page_set_max_trx_id( ...@@ -241,12 +235,6 @@ page_set_max_trx_id(
} else { } else {
mach_write_to_8(page + (PAGE_HEADER + PAGE_MAX_TRX_ID), trx_id); mach_write_to_8(page + (PAGE_HEADER + PAGE_MAX_TRX_ID), trx_id);
} }
#ifndef UNIV_HOTBACKUP
if (is_hashed) {
rw_lock_x_unlock(&btr_search_latch);
}
#endif /* !UNIV_HOTBACKUP */
} }
/************************************************************//** /************************************************************//**
......
...@@ -1213,7 +1213,6 @@ sync_thread_add_level( ...@@ -1213,7 +1213,6 @@ sync_thread_add_level(
case SYNC_FILE_FORMAT_TAG: case SYNC_FILE_FORMAT_TAG:
case SYNC_DOUBLEWRITE: case SYNC_DOUBLEWRITE:
case SYNC_SEARCH_SYS: case SYNC_SEARCH_SYS:
case SYNC_SEARCH_SYS_CONF:
case SYNC_TRX_LOCK_HEAP: case SYNC_TRX_LOCK_HEAP:
case SYNC_KERNEL: case SYNC_KERNEL:
case SYNC_IBUF_BITMAP_MUTEX: case SYNC_IBUF_BITMAP_MUTEX:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment