Commit 54b69cac authored by Vadim Tkachenko's avatar Vadim Tkachenko

disable split_buffer_pool_mutex

parent 8bda156a
......@@ -3733,8 +3733,7 @@ btr_blob_free(
mtr_commit(mtr);
//buf_pool_mutex_enter();
mutex_enter(&LRU_list_mutex);
buf_pool_mutex_enter();
mutex_enter(&block->mutex);
/* Only free the block if it is still allocated to
......@@ -3745,22 +3744,17 @@ btr_blob_free(
&& buf_block_get_space(block) == space
&& buf_block_get_page_no(block) == page_no) {
if (buf_LRU_free_block(&block->page, all, NULL, TRUE)
if (buf_LRU_free_block(&block->page, all, NULL)
!= BUF_LRU_FREED
&& all && block->page.zip.data
/* Now, buf_LRU_free_block() may release mutex temporarily */
&& buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE
&& buf_block_get_space(block) == space
&& buf_block_get_page_no(block) == page_no) {
&& all && block->page.zip.data) {
/* Attempt to deallocate the uncompressed page
if the whole block cannot be deallocted. */
buf_LRU_free_block(&block->page, FALSE, NULL, TRUE);
buf_LRU_free_block(&block->page, FALSE, NULL);
}
}
//buf_pool_mutex_exit();
mutex_exit(&LRU_list_mutex);
buf_pool_mutex_exit();
mutex_exit(&block->mutex);
}
......
......@@ -1731,8 +1731,7 @@ btr_search_validate(void)
rec_offs_init(offsets_);
rw_lock_x_lock(&btr_search_latch);
//buf_pool_mutex_enter();
rw_lock_x_lock(&page_hash_latch);
buf_pool_mutex_enter();
cell_count = hash_get_n_cells(btr_search_sys->hash_index);
......@@ -1740,13 +1739,11 @@ btr_search_validate(void)
/* We release btr_search_latch every once in a while to
give other queries a chance to run. */
if ((i != 0) && ((i % chunk_size) == 0)) {
//buf_pool_mutex_exit();
rw_lock_x_unlock(&page_hash_latch);
buf_pool_mutex_exit();
rw_lock_x_unlock(&btr_search_latch);
os_thread_yield();
rw_lock_x_lock(&btr_search_latch);
//buf_pool_mutex_enter();
rw_lock_x_lock(&page_hash_latch);
buf_pool_mutex_enter();
}
node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
......@@ -1853,13 +1850,11 @@ btr_search_validate(void)
/* We release btr_search_latch every once in a while to
give other queries a chance to run. */
if (i != 0) {
//buf_pool_mutex_exit();
rw_lock_x_unlock(&page_hash_latch);
buf_pool_mutex_exit();
rw_lock_x_unlock(&btr_search_latch);
os_thread_yield();
rw_lock_x_lock(&btr_search_latch);
//buf_pool_mutex_enter();
rw_lock_x_lock(&page_hash_latch);
buf_pool_mutex_enter();
}
if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
......@@ -1867,8 +1862,7 @@ btr_search_validate(void)
}
}
//buf_pool_mutex_exit();
rw_lock_x_unlock(&page_hash_latch);
buf_pool_mutex_exit();
rw_lock_x_unlock(&btr_search_latch);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
......
This diff is collapsed.
This diff is collapsed.
......@@ -61,8 +61,7 @@ buf_flush_insert_into_flush_list(
/*=============================*/
buf_block_t* block) /* in/out: block which is modified */
{
//ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&flush_list_mutex));
ut_ad(buf_pool_mutex_own());
ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
|| (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification
<= block->page.oldest_modification));
......@@ -93,8 +92,7 @@ buf_flush_insert_sorted_into_flush_list(
buf_page_t* prev_b;
buf_page_t* b;
//ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&flush_list_mutex));
ut_ad(buf_pool_mutex_own());
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->page.in_LRU_list);
......@@ -136,10 +134,10 @@ buf_flush_ready_for_replace(
buf_page_in_file(bpage) and in the LRU list */
{
//ut_ad(buf_pool_mutex_own());
//ut_ad(mutex_own(buf_page_get_mutex(bpage)));
//ut_ad(bpage->in_LRU_list);
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
//ut_ad(bpage->in_LRU_list); /* optimistic use */
if (UNIV_LIKELY(bpage->in_LRU_list && buf_page_in_file(bpage))) {
if (UNIV_LIKELY(buf_page_in_file(bpage))) {
return(bpage->oldest_modification == 0
&& buf_page_get_io_fix(bpage) == BUF_IO_NONE
......@@ -172,7 +170,7 @@ buf_flush_ready_for_flush(
enum buf_flush flush_type)/* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
{
ut_a(buf_page_in_file(bpage));
//ut_ad(buf_pool_mutex_own()); /*optimistic...*/
ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(flush_type == BUF_FLUSH_LRU || BUF_FLUSH_LIST);
......@@ -205,8 +203,7 @@ buf_flush_remove(
/*=============*/
buf_page_t* bpage) /* in: pointer to the block in question */
{
//ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&flush_list_mutex));
ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(bpage->in_flush_list);
ut_d(bpage->in_flush_list = FALSE);
......@@ -681,9 +678,7 @@ buf_flush_write_block_low(
io_fixed and oldest_modification != 0. Thus, it cannot be
relocated in the buffer pool or removed from flush_list or
LRU_list. */
//ut_ad(!buf_pool_mutex_own());
ut_ad(!mutex_own(&LRU_list_mutex));
ut_ad(!mutex_own(&flush_list_mutex));
ut_ad(!buf_pool_mutex_own());
ut_ad(!mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_WRITE);
ut_ad(bpage->oldest_modification != 0);
......@@ -767,19 +762,12 @@ buf_flush_page(
ibool is_uncompressed;
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
//ut_ad(buf_pool_mutex_own());
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&page_hash_latch, RW_LOCK_EX)
|| rw_lock_own(&page_hash_latch, RW_LOCK_SHARED));
#endif
ut_ad(buf_pool_mutex_own());
ut_ad(buf_page_in_file(bpage));
block_mutex = buf_page_get_mutex(bpage);
ut_ad(mutex_own(block_mutex));
mutex_enter(&buf_pool_mutex);
rw_lock_s_unlock(&page_hash_latch);
ut_ad(buf_flush_ready_for_flush(bpage, flush_type));
buf_page_set_io_fix(bpage, BUF_IO_WRITE);
......@@ -810,8 +798,7 @@ buf_flush_page(
}
mutex_exit(block_mutex);
//buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
buf_pool_mutex_exit();
/* Even though bpage is not protected by any mutex at
this point, it is safe to access bpage, because it is
......@@ -848,8 +835,7 @@ buf_flush_page(
immediately. */
mutex_exit(block_mutex);
//buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
buf_pool_mutex_exit();
break;
default:
......@@ -913,8 +899,7 @@ buf_flush_try_neighbors(
high = fil_space_get_size(space);
}
//buf_pool_mutex_enter();
rw_lock_s_lock(&page_hash_latch);
buf_pool_mutex_enter();
for (i = low; i < high; i++) {
......@@ -951,16 +936,14 @@ buf_flush_try_neighbors(
ut_ad(!mutex_own(block_mutex));
count++;
//buf_pool_mutex_enter();
rw_lock_s_lock(&page_hash_latch);
buf_pool_mutex_enter();
} else {
mutex_exit(block_mutex);
}
}
}
//buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
buf_pool_mutex_exit();
return(count);
}
......@@ -1004,29 +987,20 @@ buf_flush_batch(
ut_ad((flush_type != BUF_FLUSH_LIST)
|| sync_thread_levels_empty_gen(TRUE));
#endif /* UNIV_SYNC_DEBUG */
//buf_pool_mutex_enter();
mutex_enter(&buf_pool_mutex);
buf_pool_mutex_enter();
if ((buf_pool->n_flush[flush_type] > 0)
|| (buf_pool->init_flush[flush_type] == TRUE)) {
/* There is already a flush batch of the same type running */
//buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
buf_pool_mutex_exit();
return(ULINT_UNDEFINED);
}
buf_pool->init_flush[flush_type] = TRUE;
mutex_exit(&buf_pool_mutex);
if (flush_type == BUF_FLUSH_LRU) {
mutex_enter(&LRU_list_mutex);
}
mutex_enter(&flush_list_mutex);
for (;;) {
flush_next:
/* If we have flushed enough, leave the loop */
......@@ -1073,11 +1047,7 @@ buf_flush_batch(
space = buf_page_get_space(bpage);
offset = buf_page_get_page_no(bpage);
//buf_pool_mutex_exit();
if (flush_type == BUF_FLUSH_LRU) {
mutex_exit(&LRU_list_mutex);
}
mutex_exit(&flush_list_mutex);
buf_pool_mutex_exit();
old_page_count = page_count;
......@@ -1087,8 +1057,7 @@ buf_flush_batch(
space, offset, flush_type);
} else {
/* Try to flush the page only */
//buf_pool_mutex_enter();
rw_lock_s_lock(&page_hash_latch);
buf_pool_mutex_enter();
mutex_t* block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex);
......@@ -1104,11 +1073,7 @@ buf_flush_batch(
flush_type, offset,
page_count - old_page_count); */
//buf_pool_mutex_enter();
if (flush_type == BUF_FLUSH_LRU) {
mutex_enter(&LRU_list_mutex);
}
mutex_enter(&flush_list_mutex);
buf_pool_mutex_enter();
goto flush_next;
} else if (flush_type == BUF_FLUSH_LRU) {
......@@ -1126,13 +1091,6 @@ buf_flush_batch(
break;
}
if (flush_type == BUF_FLUSH_LRU) {
mutex_exit(&LRU_list_mutex);
}
mutex_exit(&flush_list_mutex);
mutex_enter(&buf_pool_mutex);
buf_pool->init_flush[flush_type] = FALSE;
if (buf_pool->n_flush[flush_type] == 0) {
......@@ -1142,8 +1100,7 @@ buf_flush_batch(
os_event_set(buf_pool->no_flush[flush_type]);
}
//buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
buf_pool_mutex_exit();
buf_flush_buffered_writes();
......@@ -1190,14 +1147,8 @@ buf_flush_LRU_recommendation(void)
buf_page_t* bpage;
ulint n_replaceable;
ulint distance = 0;
ibool have_LRU_mutex = FALSE;
if(UT_LIST_GET_LEN(buf_pool->unzip_LRU))
have_LRU_mutex = TRUE;
//buf_pool_mutex_enter();
if (have_LRU_mutex)
mutex_enter(&LRU_list_mutex);
n_replaceable = UT_LIST_GET_LEN(buf_pool->free);
......@@ -1208,12 +1159,6 @@ buf_flush_LRU_recommendation(void)
+ BUF_FLUSH_EXTRA_MARGIN)
&& (distance < BUF_LRU_FREE_SEARCH_LEN)) {
if (!bpage->in_LRU_list) {
/* reatart. but it is very optimistic */
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
continue;
}
mutex_t* block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex);
......@@ -1230,8 +1175,6 @@ buf_flush_LRU_recommendation(void)
}
//buf_pool_mutex_exit();
if (have_LRU_mutex)
mutex_exit(&LRU_list_mutex);
if (n_replaceable >= BUF_FLUSH_FREE_BLOCK_MARGIN) {
......@@ -1309,13 +1252,11 @@ buf_flush_validate(void)
{
ibool ret;
//buf_pool_mutex_enter();
mutex_enter(&flush_list_mutex);
buf_pool_mutex_enter();
ret = buf_flush_validate_low();
//buf_pool_mutex_exit();
mutex_exit(&flush_list_mutex);
buf_pool_mutex_exit();
return(ret);
}
......
This diff is collapsed.
......@@ -246,22 +246,18 @@ buf_read_ahead_random(
LRU_recent_limit = buf_LRU_get_recent_limit();
//buf_pool_mutex_enter();
mutex_enter(&buf_pool_mutex);
buf_pool_mutex_enter();
if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
//buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
buf_pool_mutex_exit();
return(0);
}
mutex_exit(&buf_pool_mutex);
/* Count how many blocks in the area have been recently accessed,
that is, reside near the start of the LRU list. */
rw_lock_s_lock(&page_hash_latch);
for (i = low; i < high; i++) {
const buf_page_t* bpage = buf_page_hash_get(space, i);
......@@ -273,15 +269,13 @@ buf_read_ahead_random(
if (recent_blocks >= BUF_READ_AHEAD_RANDOM_THRESHOLD) {
//buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
buf_pool_mutex_exit();
goto read_ahead;
}
}
}
//buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
buf_pool_mutex_exit();
/* Do nothing */
return(0);
......@@ -475,12 +469,10 @@ buf_read_ahead_linear(
tablespace_version = fil_space_get_version(space);
//buf_pool_mutex_enter();
mutex_enter(&buf_pool_mutex);
buf_pool_mutex_enter();
if (high > fil_space_get_size(space)) {
//buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
buf_pool_mutex_exit();
/* The area is not whole, return */
return(0);
......@@ -488,12 +480,10 @@ buf_read_ahead_linear(
if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
//buf_pool_mutex_exit();
mutex_exit(&buf_pool_mutex);
buf_pool_mutex_exit();
return(0);
}
mutex_exit(&buf_pool_mutex);
/* Check that almost all pages in the area have been accessed; if
offset == low, the accesses must be in a descending order, otherwise,
......@@ -507,7 +497,6 @@ buf_read_ahead_linear(
fail_count = 0;
rw_lock_s_lock(&page_hash_latch);
for (i = low; i < high; i++) {
bpage = buf_page_hash_get(space, i);
......@@ -531,8 +520,7 @@ buf_read_ahead_linear(
* LINEAR_AREA_THRESHOLD_COEF) {
/* Too many failures: return */
//buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
buf_pool_mutex_exit();
return(0);
}
......@@ -543,8 +531,7 @@ buf_read_ahead_linear(
bpage = buf_page_hash_get(space, offset);
if (bpage == NULL) {
//buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
buf_pool_mutex_exit();
return(0);
}
......@@ -570,8 +557,7 @@ buf_read_ahead_linear(
pred_offset = fil_page_get_prev(frame);
succ_offset = fil_page_get_next(frame);
//buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
buf_pool_mutex_exit();
if ((offset == low) && (succ_offset == offset + 1)) {
......
......@@ -2282,8 +2282,7 @@ i_s_cmpmem_fill_low(
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
//buf_pool_mutex_enter();
mutex_enter(&zip_free_mutex);
buf_pool_mutex_enter();
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
buf_buddy_stat_t* buddy_stat = &buf_buddy_stat[x];
......@@ -2309,8 +2308,7 @@ i_s_cmpmem_fill_low(
}
}
//buf_pool_mutex_exit();
mutex_exit(&zip_free_mutex);
buf_pool_mutex_exit();
DBUG_RETURN(status);
}
......
......@@ -28,7 +28,6 @@ struct innodb_enhancement {
{"innodb_io","Improvements to InnoDB IO","","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_opt_lru_count","Fix of buffer_pool mutex","Decreases contention on buffer_pool mutex on LRU operations","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_buffer_pool_pages","Information of buffer pool content","","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_split_buf_pool_mutex","More fix of buffer_pool mutex","Spliting buf_pool_mutex and optimizing based on innodb_opt_lru_count","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_expand_undo_slots","expandable maximum number of undo slots","from 1024 (default) to about 4000","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_extra_rseg","allow to create extra rollback segments","When create new db, the new parameter allows to create more rollback segments","http://www.percona.com/docs/wiki/percona-xtradb"},
{"innodb_overwrite_relay_log_info","overwrite relay-log.info when slave recovery","Building as plugin, it is not used.","http://www.percona.com/docs/wiki/percona-xtradb:innodb_overwrite_relay_log_info"},
......
......@@ -49,11 +49,10 @@ buf_buddy_alloc(
/* out: allocated block,
possibly NULL if lru == NULL */
ulint size, /* in: block size, up to UNIV_PAGE_SIZE */
ibool* lru, /* in: pointer to a variable that will be assigned
ibool* lru) /* in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list
and buf_pool_mutex was temporarily released,
or NULL if the LRU list should not be used */
ibool have_page_hash_mutex)
__attribute__((malloc));
/**************************************************************************
......@@ -64,8 +63,7 @@ buf_buddy_free(
/*===========*/
void* buf, /* in: block to be freed, must not be
pointed to by the buffer pool */
ulint size, /* in: block size, up to UNIV_PAGE_SIZE */
ibool have_page_hash_mutex)
ulint size) /* in: block size, up to UNIV_PAGE_SIZE */
__attribute__((nonnull));
/** Statistics of buddy blocks of a given size. */
......
......@@ -44,11 +44,10 @@ buf_buddy_alloc_low(
possibly NULL if lru==NULL */
ulint i, /* in: index of buf_pool->zip_free[],
or BUF_BUDDY_SIZES */
ibool* lru, /* in: pointer to a variable that will be assigned
ibool* lru) /* in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list
and buf_pool_mutex was temporarily released,
or NULL if the LRU list should not be used */
ibool have_page_hash_mutex)
__attribute__((malloc));
/**************************************************************************
......@@ -59,9 +58,8 @@ buf_buddy_free_low(
/*===============*/
void* buf, /* in: block to be freed, must not be
pointed to by the buffer pool */
ulint i, /* in: index of buf_pool->zip_free[],
ulint i) /* in: index of buf_pool->zip_free[],
or BUF_BUDDY_SIZES */
ibool have_page_hash_mutex)
__attribute__((nonnull));
/**************************************************************************
......@@ -100,15 +98,14 @@ buf_buddy_alloc(
/* out: allocated block,
possibly NULL if lru == NULL */
ulint size, /* in: block size, up to UNIV_PAGE_SIZE */
ibool* lru, /* in: pointer to a variable that will be assigned
ibool* lru) /* in: pointer to a variable that will be assigned
TRUE if storage was allocated from the LRU list
and buf_pool_mutex was temporarily released,
or NULL if the LRU list should not be used */
ibool have_page_hash_mutex)
{
//ut_ad(buf_pool_mutex_own());
ut_ad(buf_pool_mutex_own());
return(buf_buddy_alloc_low(buf_buddy_get_slot(size), lru, have_page_hash_mutex));
return(buf_buddy_alloc_low(buf_buddy_get_slot(size), lru));
}
/**************************************************************************
......@@ -119,26 +116,11 @@ buf_buddy_free(
/*===========*/
void* buf, /* in: block to be freed, must not be
pointed to by the buffer pool */
ulint size, /* in: block size, up to UNIV_PAGE_SIZE */
ibool have_page_hash_mutex)
ulint size) /* in: block size, up to UNIV_PAGE_SIZE */
{
//ut_ad(buf_pool_mutex_own());
if (!have_page_hash_mutex) {
mutex_enter(&LRU_list_mutex);
mutex_enter(&flush_list_mutex);
rw_lock_x_lock(&page_hash_latch);
}
mutex_enter(&zip_free_mutex);
buf_buddy_free_low(buf, buf_buddy_get_slot(size), TRUE);
mutex_exit(&zip_free_mutex);
ut_ad(buf_pool_mutex_own());
if (!have_page_hash_mutex) {
mutex_exit(&LRU_list_mutex);
mutex_exit(&flush_list_mutex);
rw_lock_x_unlock(&page_hash_latch);
}
buf_buddy_free_low(buf, buf_buddy_get_slot(size));
}
#ifdef UNIV_MATERIALIZE
......
......@@ -1061,10 +1061,10 @@ struct buf_page_struct{
UT_LIST_NODE_T(buf_page_t) LRU;
/* node of the LRU list */
//#ifdef UNIV_DEBUG
#ifdef UNIV_DEBUG
ibool in_LRU_list; /* TRUE if the page is in the LRU list;
used in debugging */
//#endif /* UNIV_DEBUG */
#endif /* UNIV_DEBUG */
unsigned old:1; /* TRUE if the block is in the old
blocks in the LRU list */
unsigned LRU_position:31;/* value which monotonically decreases
......@@ -1104,11 +1104,11 @@ struct buf_block_struct{
a block is in the unzip_LRU list
if page.state == BUF_BLOCK_FILE_PAGE
and page.zip.data != NULL */
//#ifdef UNIV_DEBUG
#ifdef UNIV_DEBUG
ibool in_unzip_LRU_list;/* TRUE if the page is in the
decompressed LRU list;
used in debugging */
//#endif /* UNIV_DEBUG */
#endif /* UNIV_DEBUG */
byte* frame; /* pointer to buffer frame which
is of size UNIV_PAGE_SIZE, and
aligned to an address divisible by
......@@ -1316,12 +1316,6 @@ struct buf_pool_struct{
/* mutex protecting the buffer pool struct and control blocks, except the
read-write lock in them */
extern mutex_t buf_pool_mutex;
extern mutex_t LRU_list_mutex;
extern mutex_t flush_list_mutex;
extern rw_lock_t page_hash_latch;
extern mutex_t free_list_mutex;
extern mutex_t zip_free_mutex;
extern mutex_t zip_hash_mutex;
/* mutex protecting the control blocks of compressed-only pages
(of type buf_page_t, not buf_block_t) */
extern mutex_t buf_pool_zip_mutex;
......
......@@ -100,8 +100,7 @@ buf_pool_get_oldest_modification(void)
buf_page_t* bpage;
ib_uint64_t lsn;
//buf_pool_mutex_enter();
mutex_enter(&flush_list_mutex);
buf_pool_mutex_enter();
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
......@@ -112,8 +111,7 @@ buf_pool_get_oldest_modification(void)
lsn = bpage->oldest_modification;
}
//buf_pool_mutex_exit();
mutex_exit(&flush_list_mutex);
buf_pool_mutex_exit();
/* The returned answer may be out of date: the flush_list can
change after the mutex has been released. */
......@@ -130,8 +128,7 @@ buf_pool_clock_tic(void)
/*====================*/
/* out: new clock value */
{
//ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&LRU_list_mutex));
ut_ad(buf_pool_mutex_own());
buf_pool->ulint_clock++;
......@@ -249,7 +246,7 @@ buf_page_in_file(
case BUF_BLOCK_ZIP_FREE:
/* This is a free page in buf_pool->zip_free[].
Such pages should only be accessed by the buddy allocator. */
/* ut_error; */ /* optimistic */
ut_error;
break;
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
......@@ -291,7 +288,7 @@ buf_page_get_LRU_position(
const buf_page_t* bpage) /* in: control block */
{
ut_ad(buf_page_in_file(bpage));
//ut_ad(buf_pool_mutex_own()); /* This is used in optimistic */
ut_ad(buf_pool_mutex_own());
return(bpage->LRU_position);
}
......@@ -308,7 +305,7 @@ buf_page_get_mutex(
{
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_FREE:
/* ut_error; */ /* optimistic */
ut_error;
return(NULL);
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
......@@ -413,7 +410,7 @@ buf_page_set_io_fix(
buf_page_t* bpage, /* in/out: control block */
enum buf_io_fix io_fix) /* in: io_fix state */
{
//ut_ad(buf_pool_mutex_own());
ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
bpage->io_fix = io_fix;
......@@ -441,13 +438,12 @@ buf_page_can_relocate(
/*==================*/
const buf_page_t* bpage) /* control block being relocated */
{
//ut_ad(buf_pool_mutex_own());
/* optimistic */
//ut_ad(mutex_own(buf_page_get_mutex(bpage)));
//ut_ad(buf_page_in_file(bpage));
//ut_ad(bpage->in_LRU_list);
ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->in_LRU_list);
return(bpage->in_LRU_list && bpage->io_fix == BUF_IO_NONE
return(buf_page_get_io_fix(bpage) == BUF_IO_NONE
&& bpage->buf_fix_count == 0);
}
......@@ -461,7 +457,7 @@ buf_page_is_old(
const buf_page_t* bpage) /* in: control block */
{
ut_ad(buf_page_in_file(bpage));
//ut_ad(buf_pool_mutex_own()); /* This is used in optimistic */
ut_ad(buf_pool_mutex_own());
return(bpage->old);
}
......@@ -476,8 +472,7 @@ buf_page_set_old(
ibool old) /* in: old */
{
ut_a(buf_page_in_file(bpage));
//ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&LRU_list_mutex));
ut_ad(buf_pool_mutex_own());
ut_ad(bpage->in_LRU_list);
#ifdef UNIV_LRU_DEBUG
......@@ -733,17 +728,17 @@ buf_block_free(
/*===========*/
buf_block_t* block) /* in, own: block to be freed */
{
//buf_pool_mutex_enter();
buf_pool_mutex_enter();
mutex_enter(&block->mutex);
ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
buf_LRU_block_free_non_file_page(block, FALSE);
buf_LRU_block_free_non_file_page(block);
mutex_exit(&block->mutex);
//buf_pool_mutex_exit();
buf_pool_mutex_exit();
}
/*************************************************************************
......@@ -788,17 +783,14 @@ buf_page_io_query(
buf_page_t* bpage) /* in: buf_pool block, must be bufferfixed */
{
ibool io_fixed;
mutex_t* block_mutex = buf_page_get_mutex(bpage);
//buf_pool_mutex_enter();
mutex_enter(block_mutex);
buf_pool_mutex_enter();
ut_ad(buf_page_in_file(bpage));
ut_ad(bpage->buf_fix_count > 0);
io_fixed = buf_page_get_io_fix(bpage) != BUF_IO_NONE;
//buf_pool_mutex_exit();
mutex_exit(block_mutex);
buf_pool_mutex_exit();
return(io_fixed);
}
......@@ -925,11 +917,7 @@ buf_page_hash_get(
ulint fold;
ut_ad(buf_pool);
//ut_ad(buf_pool_mutex_own());
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&page_hash_latch, RW_LOCK_EX)
|| rw_lock_own(&page_hash_latch, RW_LOCK_SHARED));
#endif
ut_ad(buf_pool_mutex_own());
/* Look for the page in the hash table */
......@@ -978,13 +966,11 @@ buf_page_peek(
{
const buf_page_t* bpage;
//buf_pool_mutex_enter();
rw_lock_s_lock(&page_hash_latch);
buf_pool_mutex_enter();
bpage = buf_page_hash_get(space, offset);
//buf_pool_mutex_exit();
rw_lock_s_unlock(&page_hash_latch);
buf_pool_mutex_exit();
return(bpage != NULL);
}
......@@ -1047,17 +1033,12 @@ buf_page_release(
ut_a(block->page.buf_fix_count > 0);
if (rw_latch == RW_X_LATCH && mtr->modifications) {
//buf_pool_mutex_enter();
mutex_enter(&flush_list_mutex);
mutex_enter(&block->mutex);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
buf_pool_mutex_enter();
buf_flush_note_modification(block, mtr);
//buf_pool_mutex_exit();
mutex_exit(&flush_list_mutex);
buf_pool_mutex_exit();
}
else {
mutex_enter(&block->mutex);
}
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
......
......@@ -59,8 +59,7 @@ buf_flush_note_modification(
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
//ut_ad(buf_pool_mutex_own());
ut_ad(mutex_own(&flush_list_mutex));
ut_ad(buf_pool_mutex_own());
ut_ad(mtr->start_lsn != 0);
ut_ad(mtr->modifications);
......@@ -100,8 +99,7 @@ buf_flush_recv_note_modification(
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
//buf_pool_mutex_enter();
mutex_enter(&flush_list_mutex);
buf_pool_mutex_enter();
ut_ad(block->page.newest_modification <= end_lsn);
......@@ -118,6 +116,5 @@ buf_flush_recv_note_modification(
ut_ad(block->page.oldest_modification <= start_lsn);
}
//buf_pool_mutex_exit();
mutex_exit(&flush_list_mutex);
buf_pool_mutex_exit();
}
......@@ -122,11 +122,10 @@ buf_LRU_free_block(
buf_page_t* bpage, /* in: block to be freed */
ibool zip, /* in: TRUE if should remove also the
compressed page of an uncompressed page */
ibool* buf_pool_mutex_released,
ibool* buf_pool_mutex_released);
/* in: pointer to a variable that will
be assigned TRUE if buf_pool_mutex
was temporarily released, or NULL */
ibool have_LRU_mutex);
/**********************************************************************
Try to free a replaceable block. */
UNIV_INTERN
......@@ -170,8 +169,7 @@ UNIV_INTERN
void
buf_LRU_block_free_non_file_page(
/*=============================*/
buf_block_t* block, /* in: block, must not contain a file page */
ibool have_page_hash_mutex);
buf_block_t* block); /* in: block, must not contain a file page */
/**********************************************************************
Adds a block to the LRU list. */
UNIV_INTERN
......
......@@ -35,7 +35,7 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 1
#define INNODB_VERSION_MINOR 0
#define INNODB_VERSION_BUGFIX 3
#define PERCONA_INNODB_VERSION 5
#define PERCONA_INNODB_VERSION 5a
/* The following is the InnoDB version as shown in
SELECT plugin_version FROM information_schema.plugins;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment