Commit 54b69cac authored by Vadim Tkachenko's avatar Vadim Tkachenko

disable split_buffer_pool_mutex

parent 8bda156a
...@@ -3733,8 +3733,7 @@ btr_blob_free( ...@@ -3733,8 +3733,7 @@ btr_blob_free(
mtr_commit(mtr); mtr_commit(mtr);
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
mutex_enter(&LRU_list_mutex);
mutex_enter(&block->mutex); mutex_enter(&block->mutex);
/* Only free the block if it is still allocated to /* Only free the block if it is still allocated to
...@@ -3745,22 +3744,17 @@ btr_blob_free( ...@@ -3745,22 +3744,17 @@ btr_blob_free(
&& buf_block_get_space(block) == space && buf_block_get_space(block) == space
&& buf_block_get_page_no(block) == page_no) { && buf_block_get_page_no(block) == page_no) {
if (buf_LRU_free_block(&block->page, all, NULL, TRUE) if (buf_LRU_free_block(&block->page, all, NULL)
!= BUF_LRU_FREED != BUF_LRU_FREED
&& all && block->page.zip.data && all && block->page.zip.data) {
/* Now, buf_LRU_free_block() may release mutex temporarily */
&& buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE
&& buf_block_get_space(block) == space
&& buf_block_get_page_no(block) == page_no) {
/* Attempt to deallocate the uncompressed page /* Attempt to deallocate the uncompressed page
if the whole block cannot be deallocted. */ if the whole block cannot be deallocted. */
buf_LRU_free_block(&block->page, FALSE, NULL, TRUE); buf_LRU_free_block(&block->page, FALSE, NULL);
} }
} }
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&LRU_list_mutex);
mutex_exit(&block->mutex); mutex_exit(&block->mutex);
} }
......
...@@ -1731,8 +1731,7 @@ btr_search_validate(void) ...@@ -1731,8 +1731,7 @@ btr_search_validate(void)
rec_offs_init(offsets_); rec_offs_init(offsets_);
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
rw_lock_x_lock(&page_hash_latch);
cell_count = hash_get_n_cells(btr_search_sys->hash_index); cell_count = hash_get_n_cells(btr_search_sys->hash_index);
...@@ -1740,13 +1739,11 @@ btr_search_validate(void) ...@@ -1740,13 +1739,11 @@ btr_search_validate(void)
/* We release btr_search_latch every once in a while to /* We release btr_search_latch every once in a while to
give other queries a chance to run. */ give other queries a chance to run. */
if ((i != 0) && ((i % chunk_size) == 0)) { if ((i != 0) && ((i % chunk_size) == 0)) {
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
rw_lock_x_unlock(&page_hash_latch);
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
os_thread_yield(); os_thread_yield();
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
rw_lock_x_lock(&page_hash_latch);
} }
node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node; node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
...@@ -1853,13 +1850,11 @@ btr_search_validate(void) ...@@ -1853,13 +1850,11 @@ btr_search_validate(void)
/* We release btr_search_latch every once in a while to /* We release btr_search_latch every once in a while to
give other queries a chance to run. */ give other queries a chance to run. */
if (i != 0) { if (i != 0) {
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
rw_lock_x_unlock(&page_hash_latch);
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
os_thread_yield(); os_thread_yield();
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
rw_lock_x_lock(&page_hash_latch);
} }
if (!ha_validate(btr_search_sys->hash_index, i, end_index)) { if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
...@@ -1867,8 +1862,7 @@ btr_search_validate(void) ...@@ -1867,8 +1862,7 @@ btr_search_validate(void)
} }
} }
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
rw_lock_x_unlock(&page_hash_latch);
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
if (UNIV_LIKELY_NULL(heap)) { if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap); mem_heap_free(heap);
......
This diff is collapsed.
This diff is collapsed.
...@@ -61,8 +61,7 @@ buf_flush_insert_into_flush_list( ...@@ -61,8 +61,7 @@ buf_flush_insert_into_flush_list(
/*=============================*/ /*=============================*/
buf_block_t* block) /* in/out: block which is modified */ buf_block_t* block) /* in/out: block which is modified */
{ {
//ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&flush_list_mutex));
ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL) ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
|| (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification || (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification
<= block->page.oldest_modification)); <= block->page.oldest_modification));
...@@ -93,8 +92,7 @@ buf_flush_insert_sorted_into_flush_list( ...@@ -93,8 +92,7 @@ buf_flush_insert_sorted_into_flush_list(
buf_page_t* prev_b; buf_page_t* prev_b;
buf_page_t* b; buf_page_t* b;
//ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&flush_list_mutex));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->page.in_LRU_list); ut_ad(block->page.in_LRU_list);
...@@ -136,10 +134,10 @@ buf_flush_ready_for_replace( ...@@ -136,10 +134,10 @@ buf_flush_ready_for_replace(
buf_page_in_file(bpage) and in the LRU list */ buf_page_in_file(bpage) and in the LRU list */
{ {
//ut_ad(buf_pool_mutex_own()); //ut_ad(buf_pool_mutex_own());
//ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(mutex_own(buf_page_get_mutex(bpage)));
//ut_ad(bpage->in_LRU_list); //ut_ad(bpage->in_LRU_list); /* optimistic use */
if (UNIV_LIKELY(bpage->in_LRU_list && buf_page_in_file(bpage))) { if (UNIV_LIKELY(buf_page_in_file(bpage))) {
return(bpage->oldest_modification == 0 return(bpage->oldest_modification == 0
&& buf_page_get_io_fix(bpage) == BUF_IO_NONE && buf_page_get_io_fix(bpage) == BUF_IO_NONE
...@@ -172,7 +170,7 @@ buf_flush_ready_for_flush( ...@@ -172,7 +170,7 @@ buf_flush_ready_for_flush(
enum buf_flush flush_type)/* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */ enum buf_flush flush_type)/* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
{ {
ut_a(buf_page_in_file(bpage)); ut_a(buf_page_in_file(bpage));
//ut_ad(buf_pool_mutex_own()); /*optimistic...*/ ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(flush_type == BUF_FLUSH_LRU || BUF_FLUSH_LIST); ut_ad(flush_type == BUF_FLUSH_LRU || BUF_FLUSH_LIST);
...@@ -205,8 +203,7 @@ buf_flush_remove( ...@@ -205,8 +203,7 @@ buf_flush_remove(
/*=============*/ /*=============*/
buf_page_t* bpage) /* in: pointer to the block in question */ buf_page_t* bpage) /* in: pointer to the block in question */
{ {
//ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&flush_list_mutex));
ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_flush_list); ut_ad(bpage->in_flush_list);
ut_d(bpage->in_flush_list = FALSE); ut_d(bpage->in_flush_list = FALSE);
...@@ -681,9 +678,7 @@ buf_flush_write_block_low( ...@@ -681,9 +678,7 @@ buf_flush_write_block_low(
io_fixed and oldest_modification != 0. Thus, it cannot be io_fixed and oldest_modification != 0. Thus, it cannot be
relocated in the buffer pool or removed from flush_list or relocated in the buffer pool or removed from flush_list or
LRU_list. */ LRU_list. */
//ut_ad(!buf_pool_mutex_own()); ut_ad(!buf_pool_mutex_own());
ut_ad(!mutex_own(&LRU_list_mutex));
ut_ad(!mutex_own(&flush_list_mutex));
ut_ad(!mutex_own(buf_page_get_mutex(bpage))); ut_ad(!mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_WRITE); ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_WRITE);
ut_ad(bpage->oldest_modification != 0); ut_ad(bpage->oldest_modification != 0);
...@@ -767,19 +762,12 @@ buf_flush_page( ...@@ -767,19 +762,12 @@ buf_flush_page(
ibool is_uncompressed; ibool is_uncompressed;
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST); ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
//ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&page_hash_latch, RW_LOCK_EX)
|| rw_lock_own(&page_hash_latch, RW_LOCK_SHARED));
#endif
ut_ad(buf_page_in_file(bpage)); ut_ad(buf_page_in_file(bpage));
block_mutex = buf_page_get_mutex(bpage); block_mutex = buf_page_get_mutex(bpage);
ut_ad(mutex_own(block_mutex)); ut_ad(mutex_own(block_mutex));
mutex_enter(&buf_pool_mutex);
rw_lock_s_unlock(&page_hash_latch);
ut_ad(buf_flush_ready_for_flush(bpage, flush_type)); ut_ad(buf_flush_ready_for_flush(bpage, flush_type));
buf_page_set_io_fix(bpage, BUF_IO_WRITE); buf_page_set_io_fix(bpage, BUF_IO_WRITE);
...@@ -810,8 +798,7 @@ buf_flush_page( ...@@ -810,8 +798,7 @@ buf_flush_page(
} }
mutex_exit(block_mutex); mutex_exit(block_mutex);
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
/* Even though bpage is not protected by any mutex at /* Even though bpage is not protected by any mutex at
this point, it is safe to access bpage, because it is this point, it is safe to access bpage, because it is
...@@ -848,8 +835,7 @@ buf_flush_page( ...@@ -848,8 +835,7 @@ buf_flush_page(
immediately. */ immediately. */
mutex_exit(block_mutex); mutex_exit(block_mutex);
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
break; break;
default: default:
...@@ -913,8 +899,7 @@ buf_flush_try_neighbors( ...@@ -913,8 +899,7 @@ buf_flush_try_neighbors(
high = fil_space_get_size(space); high = fil_space_get_size(space);
} }
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
rw_lock_s_lock(&page_hash_latch);
for (i = low; i < high; i++) { for (i = low; i < high; i++) {
...@@ -951,16 +936,14 @@ buf_flush_try_neighbors( ...@@ -951,16 +936,14 @@ buf_flush_try_neighbors(
ut_ad(!mutex_own(block_mutex)); ut_ad(!mutex_own(block_mutex));
count++; count++;
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
rw_lock_s_lock(&page_hash_latch);
} else { } else {
mutex_exit(block_mutex); mutex_exit(block_mutex);
} }
} }
} }
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
return(count); return(count);
} }
...@@ -1004,29 +987,20 @@ buf_flush_batch( ...@@ -1004,29 +987,20 @@ buf_flush_batch(
ut_ad((flush_type != BUF_FLUSH_LIST) ut_ad((flush_type != BUF_FLUSH_LIST)
|| sync_thread_levels_empty_gen(TRUE)); || sync_thread_levels_empty_gen(TRUE));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
mutex_enter(&buf_pool_mutex);
if ((buf_pool->n_flush[flush_type] > 0) if ((buf_pool->n_flush[flush_type] > 0)
|| (buf_pool->init_flush[flush_type] == TRUE)) { || (buf_pool->init_flush[flush_type] == TRUE)) {
/* There is already a flush batch of the same type running */ /* There is already a flush batch of the same type running */
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
return(ULINT_UNDEFINED); return(ULINT_UNDEFINED);
} }
buf_pool->init_flush[flush_type] = TRUE; buf_pool->init_flush[flush_type] = TRUE;
mutex_exit(&buf_pool_mutex);
if (flush_type == BUF_FLUSH_LRU) {
mutex_enter(&LRU_list_mutex);
}
mutex_enter(&flush_list_mutex);
for (;;) { for (;;) {
flush_next: flush_next:
/* If we have flushed enough, leave the loop */ /* If we have flushed enough, leave the loop */
...@@ -1073,11 +1047,7 @@ buf_flush_batch( ...@@ -1073,11 +1047,7 @@ buf_flush_batch(
space = buf_page_get_space(bpage); space = buf_page_get_space(bpage);
offset = buf_page_get_page_no(bpage); offset = buf_page_get_page_no(bpage);
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
if (flush_type == BUF_FLUSH_LRU) {
mutex_exit(&LRU_list_mutex);
}
mutex_exit(&flush_list_mutex);
old_page_count = page_count; old_page_count = page_count;
...@@ -1087,8 +1057,7 @@ buf_flush_batch( ...@@ -1087,8 +1057,7 @@ buf_flush_batch(
space, offset, flush_type); space, offset, flush_type);
} else { } else {
/* Try to flush the page only */ /* Try to flush the page only */
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
rw_lock_s_lock(&page_hash_latch);
mutex_t* block_mutex = buf_page_get_mutex(bpage); mutex_t* block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex); mutex_enter(block_mutex);
...@@ -1104,11 +1073,7 @@ buf_flush_batch( ...@@ -1104,11 +1073,7 @@ buf_flush_batch(
flush_type, offset, flush_type, offset,
page_count - old_page_count); */ page_count - old_page_count); */
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
if (flush_type == BUF_FLUSH_LRU) {
mutex_enter(&LRU_list_mutex);
}
mutex_enter(&flush_list_mutex);
goto flush_next; goto flush_next;
} else if (flush_type == BUF_FLUSH_LRU) { } else if (flush_type == BUF_FLUSH_LRU) {
...@@ -1126,13 +1091,6 @@ buf_flush_batch( ...@@ -1126,13 +1091,6 @@ buf_flush_batch(
break; break;
} }
if (flush_type == BUF_FLUSH_LRU) {
mutex_exit(&LRU_list_mutex);
}
mutex_exit(&flush_list_mutex);
mutex_enter(&buf_pool_mutex);
buf_pool->init_flush[flush_type] = FALSE; buf_pool->init_flush[flush_type] = FALSE;
if (buf_pool->n_flush[flush_type] == 0) { if (buf_pool->n_flush[flush_type] == 0) {
...@@ -1142,8 +1100,7 @@ buf_flush_batch( ...@@ -1142,8 +1100,7 @@ buf_flush_batch(
os_event_set(buf_pool->no_flush[flush_type]); os_event_set(buf_pool->no_flush[flush_type]);
} }
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
buf_flush_buffered_writes(); buf_flush_buffered_writes();
...@@ -1190,14 +1147,8 @@ buf_flush_LRU_recommendation(void) ...@@ -1190,14 +1147,8 @@ buf_flush_LRU_recommendation(void)
buf_page_t* bpage; buf_page_t* bpage;
ulint n_replaceable; ulint n_replaceable;
ulint distance = 0; ulint distance = 0;
ibool have_LRU_mutex = FALSE;
if(UT_LIST_GET_LEN(buf_pool->unzip_LRU))
have_LRU_mutex = TRUE;
//buf_pool_mutex_enter(); //buf_pool_mutex_enter();
if (have_LRU_mutex)
mutex_enter(&LRU_list_mutex);
n_replaceable = UT_LIST_GET_LEN(buf_pool->free); n_replaceable = UT_LIST_GET_LEN(buf_pool->free);
...@@ -1208,12 +1159,6 @@ buf_flush_LRU_recommendation(void) ...@@ -1208,12 +1159,6 @@ buf_flush_LRU_recommendation(void)
+ BUF_FLUSH_EXTRA_MARGIN) + BUF_FLUSH_EXTRA_MARGIN)
&& (distance < BUF_LRU_FREE_SEARCH_LEN)) { && (distance < BUF_LRU_FREE_SEARCH_LEN)) {
if (!bpage->in_LRU_list) {
/* reatart. but it is very optimistic */
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
continue;
}
mutex_t* block_mutex = buf_page_get_mutex(bpage); mutex_t* block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex); mutex_enter(block_mutex);
...@@ -1230,8 +1175,6 @@ buf_flush_LRU_recommendation(void) ...@@ -1230,8 +1175,6 @@ buf_flush_LRU_recommendation(void)
} }
//buf_pool_mutex_exit(); //buf_pool_mutex_exit();
if (have_LRU_mutex)
mutex_exit(&LRU_list_mutex);
if (n_replaceable >= BUF_FLUSH_FREE_BLOCK_MARGIN) { if (n_replaceable >= BUF_FLUSH_FREE_BLOCK_MARGIN) {
...@@ -1309,13 +1252,11 @@ buf_flush_validate(void) ...@@ -1309,13 +1252,11 @@ buf_flush_validate(void)
{ {
ibool ret; ibool ret;
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
mutex_enter(&flush_list_mutex);
ret = buf_flush_validate_low(); ret = buf_flush_validate_low();
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&flush_list_mutex);
return(ret); return(ret);
} }
......
This diff is collapsed.
...@@ -246,22 +246,18 @@ buf_read_ahead_random( ...@@ -246,22 +246,18 @@ buf_read_ahead_random(
LRU_recent_limit = buf_LRU_get_recent_limit(); LRU_recent_limit = buf_LRU_get_recent_limit();
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
mutex_enter(&buf_pool_mutex);
if (buf_pool->n_pend_reads if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) { > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
return(0); return(0);
} }
mutex_exit(&buf_pool_mutex);
/* Count how many blocks in the area have been recently accessed, /* Count how many blocks in the area have been recently accessed,
that is, reside near the start of the LRU list. */ that is, reside near the start of the LRU list. */
rw_lock_s_lock(&page_hash_latch);
for (i = low; i < high; i++) { for (i = low; i < high; i++) {
const buf_page_t* bpage = buf_page_hash_get(space, i); const buf_page_t* bpage = buf_page_hash_get(space, i);
...@@ -273,15 +269,13 @@ buf_read_ahead_random( ...@@ -273,15 +269,13 @@ buf_read_ahead_random(
if (recent_blocks >= BUF_READ_AHEAD_RANDOM_THRESHOLD) { if (recent_blocks >= BUF_READ_AHEAD_RANDOM_THRESHOLD) {
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
goto read_ahead; goto read_ahead;
} }
} }
} }
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
/* Do nothing */ /* Do nothing */
return(0); return(0);
...@@ -475,12 +469,10 @@ buf_read_ahead_linear( ...@@ -475,12 +469,10 @@ buf_read_ahead_linear(
tablespace_version = fil_space_get_version(space); tablespace_version = fil_space_get_version(space);
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
mutex_enter(&buf_pool_mutex);
if (high > fil_space_get_size(space)) { if (high > fil_space_get_size(space)) {
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
/* The area is not whole, return */ /* The area is not whole, return */
return(0); return(0);
...@@ -488,12 +480,10 @@ buf_read_ahead_linear( ...@@ -488,12 +480,10 @@ buf_read_ahead_linear(
if (buf_pool->n_pend_reads if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) { > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
return(0); return(0);
} }
mutex_exit(&buf_pool_mutex);
/* Check that almost all pages in the area have been accessed; if /* Check that almost all pages in the area have been accessed; if
offset == low, the accesses must be in a descending order, otherwise, offset == low, the accesses must be in a descending order, otherwise,
...@@ -507,7 +497,6 @@ buf_read_ahead_linear( ...@@ -507,7 +497,6 @@ buf_read_ahead_linear(
fail_count = 0; fail_count = 0;
rw_lock_s_lock(&page_hash_latch);
for (i = low; i < high; i++) { for (i = low; i < high; i++) {
bpage = buf_page_hash_get(space, i); bpage = buf_page_hash_get(space, i);
...@@ -531,8 +520,7 @@ buf_read_ahead_linear( ...@@ -531,8 +520,7 @@ buf_read_ahead_linear(
* LINEAR_AREA_THRESHOLD_COEF) { * LINEAR_AREA_THRESHOLD_COEF) {
/* Too many failures: return */ /* Too many failures: return */
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
return(0); return(0);
} }
...@@ -543,8 +531,7 @@ buf_read_ahead_linear( ...@@ -543,8 +531,7 @@ buf_read_ahead_linear(
bpage = buf_page_hash_get(space, offset); bpage = buf_page_hash_get(space, offset);
if (bpage == NULL) { if (bpage == NULL) {
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
return(0); return(0);
} }
...@@ -570,8 +557,7 @@ buf_read_ahead_linear( ...@@ -570,8 +557,7 @@ buf_read_ahead_linear(
pred_offset = fil_page_get_prev(frame); pred_offset = fil_page_get_prev(frame);
succ_offset = fil_page_get_next(frame); succ_offset = fil_page_get_next(frame);
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
if ((offset == low) && (succ_offset == offset + 1)) { if ((offset == low) && (succ_offset == offset + 1)) {
......
...@@ -2282,8 +2282,7 @@ i_s_cmpmem_fill_low( ...@@ -2282,8 +2282,7 @@ i_s_cmpmem_fill_low(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name); RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
mutex_enter(&zip_free_mutex);
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) { for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
buf_buddy_stat_t* buddy_stat = &buf_buddy_stat[x]; buf_buddy_stat_t* buddy_stat = &buf_buddy_stat[x];
...@@ -2309,8 +2308,7 @@ i_s_cmpmem_fill_low( ...@@ -2309,8 +2308,7 @@ i_s_cmpmem_fill_low(
} }
} }
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&zip_free_mutex);
DBUG_RETURN(status); DBUG_RETURN(status);
} }
......
...@@ -28,7 +28,6 @@ struct innodb_enhancement { ...@@ -28,7 +28,6 @@ struct innodb_enhancement {
{"innodb_io","Improvements to InnoDB IO","","http://www.percona.com/docs/wiki/percona-xtradb"}, {"innodb_io","Improvements to InnoDB IO","","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_opt_lru_count","Fix of buffer_pool mutex","Decreases contention on buffer_pool mutex on LRU operations","http://www.percona.com/docs/wiki/percona-xtradb"}, {"innodb_opt_lru_count","Fix of buffer_pool mutex","Decreases contention on buffer_pool mutex on LRU operations","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_buffer_pool_pages","Information of buffer pool content","","http://www.percona.com/docs/wiki/percona-xtradb"}, {"innodb_buffer_pool_pages","Information of buffer pool content","","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_split_buf_pool_mutex","More fix of buffer_pool mutex","Spliting buf_pool_mutex and optimizing based on innodb_opt_lru_count","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_expand_undo_slots","expandable maximum number of undo slots","from 1024 (default) to about 4000","http://www.percona.com/docs/wiki/percona-xtradb"}, {"innodb_expand_undo_slots","expandable maximum number of undo slots","from 1024 (default) to about 4000","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_extra_rseg","allow to create extra rollback segments","When create new db, the new parameter allows to create more rollback segments","http://www.percona.com/docs/wiki/percona-xtradb"}, {"innodb_extra_rseg","allow to create extra rollback segments","When create new db, the new parameter allows to create more rollback segments","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_overwrite_relay_log_info","overwrite relay-log.info when slave recovery","Building as plugin, it is not used.","http://www.percona.com/docs/wiki/percona-xtradb:innodb_overwrite_relay_log_info"}, {"innodb_overwrite_relay_log_info","overwrite relay-log.info when slave recovery","Building as plugin, it is not used.","http://www.percona.com/docs/wiki/percona-xtradb:innodb_overwrite_relay_log_info"},
......
...@@ -49,11 +49,10 @@ buf_buddy_alloc( ...@@ -49,11 +49,10 @@ buf_buddy_alloc(
/* out: allocated block, /* out: allocated block,
possibly NULL if lru == NULL */ possibly NULL if lru == NULL */
ulint size, /* in: block size, up to UNIV_PAGE_SIZE */ ulint size, /* in: block size, up to UNIV_PAGE_SIZE */
ibool* lru, /* in: pointer to a variable that will be assigned ibool* lru) /* in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list TRUE if storage was allocated from the LRU list
and buf_pool_mutex was temporarily released, and buf_pool_mutex was temporarily released,
or NULL if the LRU list should not be used */ or NULL if the LRU list should not be used */
ibool have_page_hash_mutex)
__attribute__((malloc)); __attribute__((malloc));
/************************************************************************** /**************************************************************************
...@@ -64,8 +63,7 @@ buf_buddy_free( ...@@ -64,8 +63,7 @@ buf_buddy_free(
/*===========*/ /*===========*/
void* buf, /* in: block to be freed, must not be void* buf, /* in: block to be freed, must not be
pointed to by the buffer pool */ pointed to by the buffer pool */
ulint size, /* in: block size, up to UNIV_PAGE_SIZE */ ulint size) /* in: block size, up to UNIV_PAGE_SIZE */
ibool have_page_hash_mutex)
__attribute__((nonnull)); __attribute__((nonnull));
/** Statistics of buddy blocks of a given size. */ /** Statistics of buddy blocks of a given size. */
......
...@@ -44,11 +44,10 @@ buf_buddy_alloc_low( ...@@ -44,11 +44,10 @@ buf_buddy_alloc_low(
possibly NULL if lru==NULL */ possibly NULL if lru==NULL */
ulint i, /* in: index of buf_pool->zip_free[], ulint i, /* in: index of buf_pool->zip_free[],
or BUF_BUDDY_SIZES */ or BUF_BUDDY_SIZES */
ibool* lru, /* in: pointer to a variable that will be assigned ibool* lru) /* in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list TRUE if storage was allocated from the LRU list
and buf_pool_mutex was temporarily released, and buf_pool_mutex was temporarily released,
or NULL if the LRU list should not be used */ or NULL if the LRU list should not be used */
ibool have_page_hash_mutex)
__attribute__((malloc)); __attribute__((malloc));
/************************************************************************** /**************************************************************************
...@@ -59,9 +58,8 @@ buf_buddy_free_low( ...@@ -59,9 +58,8 @@ buf_buddy_free_low(
/*===============*/ /*===============*/
void* buf, /* in: block to be freed, must not be void* buf, /* in: block to be freed, must not be
pointed to by the buffer pool */ pointed to by the buffer pool */
ulint i, /* in: index of buf_pool->zip_free[], ulint i) /* in: index of buf_pool->zip_free[],
or BUF_BUDDY_SIZES */ or BUF_BUDDY_SIZES */
ibool have_page_hash_mutex)
__attribute__((nonnull)); __attribute__((nonnull));
/************************************************************************** /**************************************************************************
...@@ -100,15 +98,14 @@ buf_buddy_alloc( ...@@ -100,15 +98,14 @@ buf_buddy_alloc(
/* out: allocated block, /* out: allocated block,
possibly NULL if lru == NULL */ possibly NULL if lru == NULL */
ulint size, /* in: block size, up to UNIV_PAGE_SIZE */ ulint size, /* in: block size, up to UNIV_PAGE_SIZE */
ibool* lru, /* in: pointer to a variable that will be assigned ibool* lru) /* in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list TRUE if storage was allocated from the LRU list
and buf_pool_mutex was temporarily released, and buf_pool_mutex was temporarily released,
or NULL if the LRU list should not be used */ or NULL if the LRU list should not be used */
ibool have_page_hash_mutex)
{ {
//ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
return(buf_buddy_alloc_low(buf_buddy_get_slot(size), lru, have_page_hash_mutex)); return(buf_buddy_alloc_low(buf_buddy_get_slot(size), lru));
} }
/************************************************************************** /**************************************************************************
...@@ -119,26 +116,11 @@ buf_buddy_free( ...@@ -119,26 +116,11 @@ buf_buddy_free(
/*===========*/ /*===========*/
void* buf, /* in: block to be freed, must not be void* buf, /* in: block to be freed, must not be
pointed to by the buffer pool */ pointed to by the buffer pool */
ulint size, /* in: block size, up to UNIV_PAGE_SIZE */ ulint size) /* in: block size, up to UNIV_PAGE_SIZE */
ibool have_page_hash_mutex)
{ {
//ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
if (!have_page_hash_mutex) {
mutex_enter(&LRU_list_mutex);
mutex_enter(&flush_list_mutex);
rw_lock_x_lock(&page_hash_latch);
}
mutex_enter(&zip_free_mutex);
buf_buddy_free_low(buf, buf_buddy_get_slot(size), TRUE);
mutex_exit(&zip_free_mutex);
if (!have_page_hash_mutex) { buf_buddy_free_low(buf, buf_buddy_get_slot(size));
mutex_exit(&LRU_list_mutex);
mutex_exit(&flush_list_mutex);
rw_lock_x_unlock(&page_hash_latch);
}
} }
#ifdef UNIV_MATERIALIZE #ifdef UNIV_MATERIALIZE
......
...@@ -1061,10 +1061,10 @@ struct buf_page_struct{ ...@@ -1061,10 +1061,10 @@ struct buf_page_struct{
UT_LIST_NODE_T(buf_page_t) LRU; UT_LIST_NODE_T(buf_page_t) LRU;
/* node of the LRU list */ /* node of the LRU list */
//#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
ibool in_LRU_list; /* TRUE if the page is in the LRU list; ibool in_LRU_list; /* TRUE if the page is in the LRU list;
used in debugging */ used in debugging */
//#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
unsigned old:1; /* TRUE if the block is in the old unsigned old:1; /* TRUE if the block is in the old
blocks in the LRU list */ blocks in the LRU list */
unsigned LRU_position:31;/* value which monotonically decreases unsigned LRU_position:31;/* value which monotonically decreases
...@@ -1104,11 +1104,11 @@ struct buf_block_struct{ ...@@ -1104,11 +1104,11 @@ struct buf_block_struct{
a block is in the unzip_LRU list a block is in the unzip_LRU list
if page.state == BUF_BLOCK_FILE_PAGE if page.state == BUF_BLOCK_FILE_PAGE
and page.zip.data != NULL */ and page.zip.data != NULL */
//#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
ibool in_unzip_LRU_list;/* TRUE if the page is in the ibool in_unzip_LRU_list;/* TRUE if the page is in the
decompressed LRU list; decompressed LRU list;
used in debugging */ used in debugging */
//#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
byte* frame; /* pointer to buffer frame which byte* frame; /* pointer to buffer frame which
is of size UNIV_PAGE_SIZE, and is of size UNIV_PAGE_SIZE, and
aligned to an address divisible by aligned to an address divisible by
...@@ -1316,12 +1316,6 @@ struct buf_pool_struct{ ...@@ -1316,12 +1316,6 @@ struct buf_pool_struct{
/* mutex protecting the buffer pool struct and control blocks, except the /* mutex protecting the buffer pool struct and control blocks, except the
read-write lock in them */ read-write lock in them */
extern mutex_t buf_pool_mutex; extern mutex_t buf_pool_mutex;
extern mutex_t LRU_list_mutex;
extern mutex_t flush_list_mutex;
extern rw_lock_t page_hash_latch;
extern mutex_t free_list_mutex;
extern mutex_t zip_free_mutex;
extern mutex_t zip_hash_mutex;
/* mutex protecting the control blocks of compressed-only pages /* mutex protecting the control blocks of compressed-only pages
(of type buf_page_t, not buf_block_t) */ (of type buf_page_t, not buf_block_t) */
extern mutex_t buf_pool_zip_mutex; extern mutex_t buf_pool_zip_mutex;
......
...@@ -100,8 +100,7 @@ buf_pool_get_oldest_modification(void) ...@@ -100,8 +100,7 @@ buf_pool_get_oldest_modification(void)
buf_page_t* bpage; buf_page_t* bpage;
ib_uint64_t lsn; ib_uint64_t lsn;
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
mutex_enter(&flush_list_mutex);
bpage = UT_LIST_GET_LAST(buf_pool->flush_list); bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
...@@ -112,8 +111,7 @@ buf_pool_get_oldest_modification(void) ...@@ -112,8 +111,7 @@ buf_pool_get_oldest_modification(void)
lsn = bpage->oldest_modification; lsn = bpage->oldest_modification;
} }
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&flush_list_mutex);
/* The returned answer may be out of date: the flush_list can /* The returned answer may be out of date: the flush_list can
change after the mutex has been released. */ change after the mutex has been released. */
...@@ -130,8 +128,7 @@ buf_pool_clock_tic(void) ...@@ -130,8 +128,7 @@ buf_pool_clock_tic(void)
/*====================*/ /*====================*/
/* out: new clock value */ /* out: new clock value */
{ {
//ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&LRU_list_mutex));
buf_pool->ulint_clock++; buf_pool->ulint_clock++;
...@@ -249,7 +246,7 @@ buf_page_in_file( ...@@ -249,7 +246,7 @@ buf_page_in_file(
case BUF_BLOCK_ZIP_FREE: case BUF_BLOCK_ZIP_FREE:
/* This is a free page in buf_pool->zip_free[]. /* This is a free page in buf_pool->zip_free[].
Such pages should only be accessed by the buddy allocator. */ Such pages should only be accessed by the buddy allocator. */
/* ut_error; */ /* optimistic */ ut_error;
break; break;
case BUF_BLOCK_ZIP_PAGE: case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY: case BUF_BLOCK_ZIP_DIRTY:
...@@ -291,7 +288,7 @@ buf_page_get_LRU_position( ...@@ -291,7 +288,7 @@ buf_page_get_LRU_position(
const buf_page_t* bpage) /* in: control block */ const buf_page_t* bpage) /* in: control block */
{ {
ut_ad(buf_page_in_file(bpage)); ut_ad(buf_page_in_file(bpage));
//ut_ad(buf_pool_mutex_own()); /* This is used in optimistic */ ut_ad(buf_pool_mutex_own());
return(bpage->LRU_position); return(bpage->LRU_position);
} }
...@@ -308,7 +305,7 @@ buf_page_get_mutex( ...@@ -308,7 +305,7 @@ buf_page_get_mutex(
{ {
switch (buf_page_get_state(bpage)) { switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_FREE: case BUF_BLOCK_ZIP_FREE:
/* ut_error; */ /* optimistic */ ut_error;
return(NULL); return(NULL);
case BUF_BLOCK_ZIP_PAGE: case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY: case BUF_BLOCK_ZIP_DIRTY:
...@@ -413,7 +410,7 @@ buf_page_set_io_fix( ...@@ -413,7 +410,7 @@ buf_page_set_io_fix(
buf_page_t* bpage, /* in/out: control block */ buf_page_t* bpage, /* in/out: control block */
enum buf_io_fix io_fix) /* in: io_fix state */ enum buf_io_fix io_fix) /* in: io_fix state */
{ {
//ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(mutex_own(buf_page_get_mutex(bpage)));
bpage->io_fix = io_fix; bpage->io_fix = io_fix;
...@@ -441,13 +438,12 @@ buf_page_can_relocate( ...@@ -441,13 +438,12 @@ buf_page_can_relocate(
/*==================*/ /*==================*/
const buf_page_t* bpage) /* control block being relocated */ const buf_page_t* bpage) /* control block being relocated */
{ {
//ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
/* optimistic */ ut_ad(mutex_own(buf_page_get_mutex(bpage)));
//ut_ad(mutex_own(buf_page_get_mutex(bpage))); ut_ad(buf_page_in_file(bpage));
//ut_ad(buf_page_in_file(bpage)); ut_ad(bpage->in_LRU_list);
//ut_ad(bpage->in_LRU_list);
return(bpage->in_LRU_list && bpage->io_fix == BUF_IO_NONE return(buf_page_get_io_fix(bpage) == BUF_IO_NONE
&& bpage->buf_fix_count == 0); && bpage->buf_fix_count == 0);
} }
...@@ -461,7 +457,7 @@ buf_page_is_old( ...@@ -461,7 +457,7 @@ buf_page_is_old(
const buf_page_t* bpage) /* in: control block */ const buf_page_t* bpage) /* in: control block */
{ {
ut_ad(buf_page_in_file(bpage)); ut_ad(buf_page_in_file(bpage));
//ut_ad(buf_pool_mutex_own()); /* This is used in optimistic */ ut_ad(buf_pool_mutex_own());
return(bpage->old); return(bpage->old);
} }
...@@ -476,8 +472,7 @@ buf_page_set_old( ...@@ -476,8 +472,7 @@ buf_page_set_old(
ibool old) /* in: old */ ibool old) /* in: old */
{ {
ut_a(buf_page_in_file(bpage)); ut_a(buf_page_in_file(bpage));
//ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&LRU_list_mutex));
ut_ad(bpage->in_LRU_list); ut_ad(bpage->in_LRU_list);
#ifdef UNIV_LRU_DEBUG #ifdef UNIV_LRU_DEBUG
...@@ -733,17 +728,17 @@ buf_block_free( ...@@ -733,17 +728,17 @@ buf_block_free(
/*===========*/ /*===========*/
buf_block_t* block) /* in, own: block to be freed */ buf_block_t* block) /* in, own: block to be freed */
{ {
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
mutex_enter(&block->mutex); mutex_enter(&block->mutex);
ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE); ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
buf_LRU_block_free_non_file_page(block, FALSE); buf_LRU_block_free_non_file_page(block);
mutex_exit(&block->mutex); mutex_exit(&block->mutex);
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
} }
/************************************************************************* /*************************************************************************
...@@ -788,17 +783,14 @@ buf_page_io_query( ...@@ -788,17 +783,14 @@ buf_page_io_query(
buf_page_t* bpage) /* in: buf_pool block, must be bufferfixed */ buf_page_t* bpage) /* in: buf_pool block, must be bufferfixed */
{ {
ibool io_fixed; ibool io_fixed;
mutex_t* block_mutex = buf_page_get_mutex(bpage);
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
mutex_enter(block_mutex);
ut_ad(buf_page_in_file(bpage)); ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->buf_fix_count > 0); ut_ad(bpage->buf_fix_count > 0);
io_fixed = buf_page_get_io_fix(bpage) != BUF_IO_NONE; io_fixed = buf_page_get_io_fix(bpage) != BUF_IO_NONE;
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(block_mutex);
return(io_fixed); return(io_fixed);
} }
...@@ -925,11 +917,7 @@ buf_page_hash_get( ...@@ -925,11 +917,7 @@ buf_page_hash_get(
ulint fold; ulint fold;
ut_ad(buf_pool); ut_ad(buf_pool);
//ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&page_hash_latch, RW_LOCK_EX)
|| rw_lock_own(&page_hash_latch, RW_LOCK_SHARED));
#endif
/* Look for the page in the hash table */ /* Look for the page in the hash table */
...@@ -978,13 +966,11 @@ buf_page_peek( ...@@ -978,13 +966,11 @@ buf_page_peek(
{ {
const buf_page_t* bpage; const buf_page_t* bpage;
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
rw_lock_s_lock(&page_hash_latch);
bpage = buf_page_hash_get(space, offset); bpage = buf_page_hash_get(space, offset);
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
return(bpage != NULL); return(bpage != NULL);
} }
...@@ -1047,17 +1033,12 @@ buf_page_release( ...@@ -1047,17 +1033,12 @@ buf_page_release(
ut_a(block->page.buf_fix_count > 0); ut_a(block->page.buf_fix_count > 0);
if (rw_latch == RW_X_LATCH && mtr->modifications) { if (rw_latch == RW_X_LATCH && mtr->modifications) {
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
mutex_enter(&flush_list_mutex);
mutex_enter(&block->mutex);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
buf_flush_note_modification(block, mtr); buf_flush_note_modification(block, mtr);
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&flush_list_mutex);
} }
else {
mutex_enter(&block->mutex); mutex_enter(&block->mutex);
}
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch)); rw_lock_s_unlock(&(block->debug_latch));
......
...@@ -59,8 +59,7 @@ buf_flush_note_modification( ...@@ -59,8 +59,7 @@ buf_flush_note_modification(
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
//ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&flush_list_mutex));
ut_ad(mtr->start_lsn != 0); ut_ad(mtr->start_lsn != 0);
ut_ad(mtr->modifications); ut_ad(mtr->modifications);
...@@ -100,8 +99,7 @@ buf_flush_recv_note_modification( ...@@ -100,8 +99,7 @@ buf_flush_recv_note_modification(
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX)); ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
//buf_pool_mutex_enter(); buf_pool_mutex_enter();
mutex_enter(&flush_list_mutex);
ut_ad(block->page.newest_modification <= end_lsn); ut_ad(block->page.newest_modification <= end_lsn);
...@@ -118,6 +116,5 @@ buf_flush_recv_note_modification( ...@@ -118,6 +116,5 @@ buf_flush_recv_note_modification(
ut_ad(block->page.oldest_modification <= start_lsn); ut_ad(block->page.oldest_modification <= start_lsn);
} }
//buf_pool_mutex_exit(); buf_pool_mutex_exit();
mutex_exit(&flush_list_mutex);
} }
...@@ -122,11 +122,10 @@ buf_LRU_free_block( ...@@ -122,11 +122,10 @@ buf_LRU_free_block(
buf_page_t* bpage, /* in: block to be freed */ buf_page_t* bpage, /* in: block to be freed */
ibool zip, /* in: TRUE if should remove also the ibool zip, /* in: TRUE if should remove also the
compressed page of an uncompressed page */ compressed page of an uncompressed page */
ibool* buf_pool_mutex_released, ibool* buf_pool_mutex_released);
/* in: pointer to a variable that will /* in: pointer to a variable that will
be assigned TRUE if buf_pool_mutex be assigned TRUE if buf_pool_mutex
was temporarily released, or NULL */ was temporarily released, or NULL */
ibool have_LRU_mutex);
/********************************************************************** /**********************************************************************
Try to free a replaceable block. */ Try to free a replaceable block. */
UNIV_INTERN UNIV_INTERN
...@@ -170,8 +169,7 @@ UNIV_INTERN ...@@ -170,8 +169,7 @@ UNIV_INTERN
void void
buf_LRU_block_free_non_file_page( buf_LRU_block_free_non_file_page(
/*=============================*/ /*=============================*/
buf_block_t* block, /* in: block, must not contain a file page */ buf_block_t* block); /* in: block, must not contain a file page */
ibool have_page_hash_mutex);
/********************************************************************** /**********************************************************************
Adds a block to the LRU list. */ Adds a block to the LRU list. */
UNIV_INTERN UNIV_INTERN
......
...@@ -35,7 +35,7 @@ Created 1/20/1994 Heikki Tuuri ...@@ -35,7 +35,7 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 1 #define INNODB_VERSION_MAJOR 1
#define INNODB_VERSION_MINOR 0 #define INNODB_VERSION_MINOR 0
#define INNODB_VERSION_BUGFIX 3 #define INNODB_VERSION_BUGFIX 3
#define PERCONA_INNODB_VERSION 5 #define PERCONA_INNODB_VERSION 5a
/* The following is the InnoDB version as shown in /* The following is the InnoDB version as shown in
SELECT plugin_version FROM information_schema.plugins; SELECT plugin_version FROM information_schema.plugins;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment