Commit 67a0d9c0 authored by Marko Mäkelä's avatar Marko Mäkelä

Fix bit-rot left by the multiple buffer pools patch.

Rename buf_pool_watch, buf_pool_mutex, buf_pool_zip_mutex
to buf_pool->watch, buf_pool->mutex, buf_pool->zip_mutex
in comments. Refer to buf_pool->flush_list_mutex instead of
flush_list_mutex.

Remove obsolete declarations of buf_pool_mutex and buf_pool_zip_mutex.
parent a2634cf7
......@@ -281,7 +281,7 @@ buf_buddy_alloc_from(
/**********************************************************************//**
Allocate a block. The thread calling this function must hold
buf_pool->mutex and must not hold buf_pool_zip_mutex or any block->mutex.
buf_pool->mutex and must not hold buf_pool->zip_mutex or any block->mutex.
The buf_pool->mutex may only be released and reacquired if lru != NULL.
@return allocated block, possibly NULL if lru==NULL */
UNIV_INTERN
......
......@@ -172,7 +172,7 @@ The chain of modified blocks (buf_pool->flush_list) contains the blocks
holding file pages that have been modified in the memory
but not written to disk yet. The block with the oldest modification
which has not yet been written to disk is at the end of the chain.
The access to this list is protected by flush_list_mutex.
The access to this list is protected by buf_pool->flush_list_mutex.
The chain of unmodified compressed blocks (buf_pool->zip_clean)
contains the control blocks (buf_page_t) of those compressed pages
......@@ -1882,8 +1882,8 @@ buf_pool_watch_set(
ut_ad(!bpage->in_page_hash);
ut_ad(bpage->buf_fix_count == 0);
/* bpage is pointing to buf_pool_watch[],
which is protected by buf_pool_mutex.
/* bpage is pointing to buf_pool->watch[],
which is protected by buf_pool->mutex.
Normally, buf_page_t objects are protected by
buf_block_t::mutex or buf_pool->zip_mutex or both. */
......
......@@ -321,7 +321,7 @@ buf_flush_insert_sorted_into_flush_list(
buf_flush_list_mutex_enter(buf_pool);
/* The field in_LRU_list is protected by buf_pool_mutex, which
/* The field in_LRU_list is protected by buf_pool->mutex, which
we are not holding. However, while a block is in the flush
list, it is dirty and cannot be discarded, not from the
page_hash or from the LRU list. At most, the uncompressed
......@@ -1061,7 +1061,7 @@ buf_flush_write_block_low(
ut_ad(buf_page_in_file(bpage));
/* We are not holding buf_pool_mutex or block_mutex here.
/* We are not holding buf_pool->mutex or block_mutex here.
Nevertheless, it is safe to access bpage, because it is
io_fixed and oldest_modification != 0. Thus, it cannot be
relocated in the buffer pool or removed from flush_list or
......@@ -1135,7 +1135,7 @@ buf_flush_write_block_low(
# if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/********************************************************************//**
Writes a flushable page asynchronously from the buffer pool to a file.
NOTE: buf_pool_mutex and block->mutex must be held upon entering this
NOTE: buf_pool->mutex and block->mutex must be held upon entering this
function, and they will be released by this function after flushing.
This is loosely based on buf_flush_batch() and buf_flush_page().
@return TRUE if the page was flushed and the mutexes released */
......@@ -2193,12 +2193,12 @@ buf_flush_validate_low(
ut_ad(bpage->in_flush_list);
/* A page in flush_list can be in BUF_BLOCK_REMOVE_HASH
state. This happens when a page is in the middle of
being relocated. In that case the original descriptor
can have this state and still be in the flush list
waiting to acquire the flush_list_mutex to complete
the relocation. */
/* A page in buf_pool->flush_list can be in
BUF_BLOCK_REMOVE_HASH state. This happens when a page
is in the middle of being relocated. In that case the
original descriptor can have this state and still be
in the flush list waiting to acquire the
buf_pool->flush_list_mutex to complete the relocation. */
ut_a(buf_page_in_file(bpage)
|| buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH);
ut_a(om > 0);
......
......@@ -356,8 +356,8 @@ buf_LRU_invalidate_tablespace_buf_pool_instance(
prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
/* bpage->space and bpage->io_fix are protected by
buf_pool_mutex and block_mutex. It is safe to check
them while holding buf_pool_mutex only. */
buf_pool->mutex and block_mutex. It is safe to check
them while holding buf_pool->mutex only. */
if (buf_page_get_space(bpage) != id) {
/* Skip this block, as it does not belong to
......@@ -403,7 +403,7 @@ buf_LRU_invalidate_tablespace_buf_pool_instance(
/* Descriptors of uncompressed
blocks will not be relocated,
because we are holding the
buf_pool_mutex. */
buf_pool->mutex. */
break;
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
......@@ -1443,10 +1443,10 @@ Try to free a block. If bpage is a descriptor of a compressed-only
page, the descriptor object will be freed as well.
NOTE: If this function returns BUF_LRU_FREED, it will temporarily
release buf_pool_mutex. Furthermore, the page frame will no longer be
release buf_pool->mutex. Furthermore, the page frame will no longer be
accessible via bpage.
The caller must hold buf_pool_mutex and buf_page_get_mutex(bpage) and
The caller must hold buf_pool->mutex and buf_page_get_mutex(bpage) and
release these two mutexes after the call. No other
buf_page_get_mutex() may be held when calling this function.
@return BUF_LRU_FREED if freed, BUF_LRU_CANNOT_RELOCATE or
......
......@@ -36,7 +36,7 @@ Created December 2006 by Marko Makela
/**********************************************************************//**
Allocate a block. The thread calling this function must hold
buf_pool->mutex and must not hold buf_pool_zip_mutex or any
buf_pool->mutex and must not hold buf_pool->zip_mutex or any
block->mutex. The buf_pool->mutex may only be released and reacquired
if lru != NULL. This function should only be used for allocating
compressed page frames or control blocks (buf_page_t). Allocated
......
......@@ -35,7 +35,7 @@ Created December 2006 by Marko Makela
/**********************************************************************//**
Allocate a block. The thread calling this function must hold
buf_pool->mutex and must not hold buf_pool_zip_mutex or any block->mutex.
buf_pool->mutex and must not hold buf_pool->zip_mutex or any block->mutex.
The buf_pool->mutex may only be released and reacquired if lru != NULL.
@return allocated block, possibly NULL if lru==NULL */
UNIV_INTERN
......@@ -86,7 +86,7 @@ buf_buddy_get_slot(
/**********************************************************************//**
Allocate a block. The thread calling this function must hold
buf_pool->mutex and must not hold buf_pool_zip_mutex or any
buf_pool->mutex and must not hold buf_pool->zip_mutex or any
block->mutex. The buf_pool->mutex may only be released and reacquired
if lru != NULL. This function should only be used for allocating
compressed page frames or control blocks (buf_page_t). Allocated
......
......@@ -96,7 +96,7 @@ enum buf_page_state {
BUF_BLOCK_ZIP_FREE = 0, /*!< contains a free
compressed page */
BUF_BLOCK_POOL_WATCH = 0, /*!< a sentinel for the buffer pool
watch, element of buf_pool_watch[] */
watch, element of buf_pool->watch[] */
BUF_BLOCK_ZIP_PAGE, /*!< contains a clean
compressed page */
BUF_BLOCK_ZIP_DIRTY, /*!< contains a compressed
......@@ -1210,10 +1210,10 @@ struct buf_page_struct{
#endif /* !UNIV_HOTBACKUP */
page_zip_des_t zip; /*!< compressed page; zip.data
(but not the data it points to) is
also protected by buf_pool_mutex;
also protected by buf_pool->mutex;
state == BUF_BLOCK_ZIP_PAGE and
zip.data == NULL means an active
buf_pool_watch */
buf_pool->watch */
#ifndef UNIV_HOTBACKUP
buf_page_t* hash; /*!< node used in chaining to
buf_pool->page_hash or
......@@ -1224,15 +1224,16 @@ struct buf_page_struct{
#endif /* UNIV_DEBUG */
/** @name Page flushing fields
All these are protected by buf_pool_mutex. */
All these are protected by buf_pool->mutex. */
/* @{ */
UT_LIST_NODE_T(buf_page_t) list;
/*!< based on state, this is a
list node, protected either by
buf_pool_mutex or by
flush_list_mutex, in one of the
following lists in buf_pool:
buf_pool->mutex or by
buf_pool->flush_list_mutex,
in one of the following lists in
buf_pool:
- BUF_BLOCK_NOT_USED: free
- BUF_BLOCK_FILE_PAGE: flush_list
......@@ -1242,9 +1243,9 @@ struct buf_page_struct{
If bpage is part of flush_list
then the node pointers are
covered by flush_list_mutex.
covered by buf_pool->flush_list_mutex.
Otherwise these pointers are
protected by buf_pool_mutex.
protected by buf_pool->mutex.
The contents of the list node
is undefined if !in_flush_list
......@@ -1256,17 +1257,18 @@ struct buf_page_struct{
#ifdef UNIV_DEBUG
ibool in_flush_list; /*!< TRUE if in buf_pool->flush_list;
when flush_list_mutex is free, the
following should hold: in_flush_list
when buf_pool->flush_list_mutex is
free, the following should hold:
in_flush_list
== (state == BUF_BLOCK_FILE_PAGE
|| state == BUF_BLOCK_ZIP_DIRTY)
Writes to this field must be
covered by both block->mutex
and flush_list_mutex. Hence
and buf_pool->flush_list_mutex. Hence
reads can happen while holding
any one of the two mutexes */
ibool in_free_list; /*!< TRUE if in buf_pool->free; when
buf_pool_mutex is free, the following
buf_pool->mutex is free, the following
should hold: in_free_list
== (state == BUF_BLOCK_NOT_USED) */
#endif /* UNIV_DEBUG */
......@@ -1286,7 +1288,7 @@ struct buf_page_struct{
modifications are on disk.
Writes to this field must be
covered by both block->mutex
and flush_list_mutex. Hence
and buf_pool->flush_list_mutex. Hence
reads can happen while holding
any one of the two mutexes */
/* @} */
......@@ -1661,20 +1663,13 @@ struct buf_pool_struct{
/* @} */
};
/** mutex protecting the buffer pool struct and control blocks, except the
read-write lock in them */
extern mutex_t buf_pool_mutex;
/** mutex protecting the control blocks of compressed-only pages
(of type buf_page_t, not buf_block_t) */
extern mutex_t buf_pool_zip_mutex;
/** @name Accessors for buf_pool_mutex.
Use these instead of accessing buf_pool_mutex directly. */
/** @name Accessors for buf_pool->mutex.
Use these instead of accessing buf_pool->mutex directly. */
/* @{ */
/** Test if buf_pool_mutex is owned. */
/** Test if a buffer pool mutex is owned. */
#define buf_pool_mutex_own(b) mutex_own(&b->mutex)
/** Acquire the buffer pool mutex. */
/** Acquire a buffer pool mutex. */
#define buf_pool_mutex_enter(b) do { \
ut_ad(!mutex_own(&b->zip_mutex)); \
mutex_enter(&b->mutex); \
......
......@@ -87,7 +87,7 @@ buf_flush_init_for_writing(
# if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/********************************************************************//**
Writes a flushable page asynchronously from the buffer pool to a file.
NOTE: buf_pool_mutex and block->mutex must be held upon entering this
NOTE: buf_pool->mutex and block->mutex must be held upon entering this
function, and they will be released by this function after flushing.
This is loosely based on buf_flush_batch() and buf_flush_page().
@return TRUE if the page was flushed and the mutexes released */
......
......@@ -1207,8 +1207,8 @@ sync_thread_add_level(
case SYNC_BUF_BLOCK:
/* Either the thread must own the buffer pool mutex
(buf_pool_mutex), or it is allowed to latch only ONE
buffer block (block->mutex or buf_pool_zip_mutex). */
(buf_pool->mutex), or it is allowed to latch only ONE
buffer block (block->mutex or buf_pool->zip_mutex). */
if (!sync_thread_levels_g(array, level, FALSE)) {
ut_a(sync_thread_levels_g(array, level - 1, TRUE));
ut_a(sync_thread_levels_contain(array, SYNC_BUF_POOL));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment