Commit d6aed216 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-30216 Read-ahead unnecessarily allocates and frees pages when a page is in the buffer pool

buf_pool_t::page_hash_contains(): Check if a page is cached.

buf_read_ahead_random(), buf_read_page_background(),
buf_read_ahead_linear(): Before invoking buf_read_page_low(),
preallocate a buffer page for the read request.

buf_read_page(), buf_page_init_for_read(), buf_read_page_low():
Add a parameter for the buf_pool.page_hash chain, to avoid duplicated
computations.

buf_page_t::read_complete(): Only attempt recovery if an uncompressed
page frame has been allocated.

buf_page_init_for_read(): Before trying to acquire buf_pool.mutex, acquire
an exclusive buf_pool.page_hash latch and check if the page is already
located in the buffer pool. If the buf_pool.mutex is not immediately
available, release both latches and acquire them in the correct order,
and then recheck if the page is already in the buffer pool. This should
hopefully reduce some contention on buf_pool.mutex.

buf_page_init_for_read(), buf_read_page_low(): Input the "recovery needed"
flag in the least significant bit of zip_size.

buf_read_acquire(), buf_read_release(): Interface for allocating and
freeing buffer pages for reading.

buf_read_recv_pages(): Set the flag that recovery is needed.
Other ROW_FORMAT=COMPRESSED reads during recovery
will not need any recovery.
parent f8ca355e
...@@ -2095,7 +2095,7 @@ buf_page_t* buf_page_get_zip(const page_id_t page_id, ulint zip_size) ...@@ -2095,7 +2095,7 @@ buf_page_t* buf_page_get_zip(const page_id_t page_id, ulint zip_size)
return bpage; return bpage;
must_read_page: must_read_page:
if (dberr_t err= buf_read_page(page_id, zip_size)) if (dberr_t err= buf_read_page(page_id, zip_size, chain))
{ {
ib::error() << "Reading compressed page " << page_id ib::error() << "Reading compressed page " << page_id
<< " failed with error: " << err; << " failed with error: " << err;
...@@ -2319,7 +2319,7 @@ buf_page_get_low( ...@@ -2319,7 +2319,7 @@ buf_page_get_low(
corrupted, or if an encrypted page with a valid corrupted, or if an encrypted page with a valid
checksum cannot be decypted. */ checksum cannot be decypted. */
if (dberr_t local_err = buf_read_page(page_id, zip_size)) { if (dberr_t local_err = buf_read_page(page_id, zip_size, chain)) {
if (local_err != DB_CORRUPTION if (local_err != DB_CORRUPTION
&& mode != BUF_GET_POSSIBLY_FREED && mode != BUF_GET_POSSIBLY_FREED
&& retries++ < BUF_PAGE_READ_MAX_RETRIES) { && retries++ < BUF_PAGE_READ_MAX_RETRIES) {
...@@ -3235,18 +3235,16 @@ dberr_t buf_page_t::read_complete(const fil_node_t &node) ...@@ -3235,18 +3235,16 @@ dberr_t buf_page_t::read_complete(const fil_node_t &node)
<< FORCE_RECOVERY_MSG; << FORCE_RECOVERY_MSG;
} }
if (!srv_force_recovery) if (err == DB_PAGE_CORRUPTED || err == DB_DECRYPTION_FAILED ||
goto release_page; !srv_force_recovery)
}
if (err == DB_PAGE_CORRUPTED || err == DB_DECRYPTION_FAILED)
{ {
release_page: release_page:
buf_pool.corrupted_evict(this, buf_page_t::READ_FIX); buf_pool.corrupted_evict(this, buf_page_t::READ_FIX);
return err; return err;
} }
}
const bool recovery= recv_recovery_is_on(); const bool recovery= frame && recv_recovery_is_on();
if (recovery && !recv_recover_page(node.space, this)) if (recovery && !recv_recover_page(node.space, this))
return DB_PAGE_CORRUPTED; return DB_PAGE_CORRUPTED;
......
...@@ -43,6 +43,14 @@ Created 11/5/1995 Heikki Tuuri ...@@ -43,6 +43,14 @@ Created 11/5/1995 Heikki Tuuri
#include "srv0srv.h" #include "srv0srv.h"
#include "log.h" #include "log.h"
TRANSACTIONAL_TARGET
bool buf_pool_t::page_hash_contains(const page_id_t page_id, hash_chain &chain)
{
transactional_shared_lock_guard<page_hash_latch> g
{page_hash.lock_get(chain)};
return page_hash.get(page_id, chain);
}
/** If there are buf_pool.curr_size per the number below pending reads, then /** If there are buf_pool.curr_size per the number below pending reads, then
read-ahead is not done: this is to prevent flooding the buffer pool with read-ahead is not done: this is to prevent flooding the buffer pool with
i/o-fixed buffer blocks */ i/o-fixed buffer blocks */
...@@ -55,54 +63,69 @@ then this function does nothing. ...@@ -55,54 +63,69 @@ then this function does nothing.
Sets the io_fix flag to BUF_IO_READ and sets a non-recursive exclusive lock Sets the io_fix flag to BUF_IO_READ and sets a non-recursive exclusive lock
on the buffer frame. The io-handler must take care that the flag is cleared on the buffer frame. The io-handler must take care that the flag is cleared
and the lock released later. and the lock released later.
@param[in] page_id page id @param page_id page identifier
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0 @param zip_size ROW_FORMAT=COMPRESSED page size, or 0,
bitwise-ORed with 1 in recovery
@param chain buf_pool.page_hash cell for page_id
@param block preallocated buffer block (set to nullptr if consumed)
@return pointer to the block @return pointer to the block
@retval NULL in case of an error */ @retval nullptr in case of an error */
TRANSACTIONAL_TARGET TRANSACTIONAL_TARGET
static buf_page_t* buf_page_init_for_read(const page_id_t page_id, static buf_page_t *buf_page_init_for_read(const page_id_t page_id,
ulint zip_size) ulint zip_size,
buf_pool_t::hash_chain &chain,
buf_block_t *&block)
{ {
buf_page_t *bpage= nullptr; buf_page_t *bpage= nullptr;
buf_block_t *block= nullptr; if (!zip_size || (zip_size & 1))
if (!zip_size || recv_recovery_is_on())
{ {
block= buf_LRU_get_free_block(false); bpage= &block->page;
block->initialise(page_id, zip_size, buf_page_t::READ_FIX); block->initialise(page_id, zip_size & ~1, buf_page_t::READ_FIX);
/* x_unlock() will be invoked /* x_unlock() will be invoked
in buf_page_t::read_complete() by the io-handler thread. */ in buf_page_t::read_complete() by the io-handler thread. */
block->page.lock.x_lock(true); block->page.lock.x_lock(true);
} }
buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(page_id.fold()); page_hash_latch &hash_lock= buf_pool.page_hash.lock_get(chain);
hash_lock.lock();
mysql_mutex_lock(&buf_pool.mutex);
if (buf_pool.page_hash.get(page_id, chain)) if (buf_pool.page_hash.get(page_id, chain))
{ {
page_exists:
hash_lock.unlock();
/* The page is already in the buffer pool. */ /* The page is already in the buffer pool. */
if (block) if (bpage)
{ {
block->page.lock.x_unlock(true); bpage->lock.x_unlock(true);
ut_d(block->page.set_state(buf_page_t::MEMORY)); ut_d(mysql_mutex_lock(&buf_pool.mutex));
buf_LRU_block_free_non_file_page(block); ut_d(bpage->set_state(buf_page_t::MEMORY));
ut_d(mysql_mutex_unlock(&buf_pool.mutex));
} }
goto func_exit; return nullptr;
} }
if (UNIV_LIKELY(block != nullptr)) if (UNIV_UNLIKELY(mysql_mutex_trylock(&buf_pool.mutex)))
{ {
bpage= &block->page; hash_lock.unlock();
mysql_mutex_lock(&buf_pool.mutex);
/* Insert into the hash table of file pages */ hash_lock.lock();
if (buf_pool.page_hash.get(page_id, chain))
{ {
transactional_lock_guard<page_hash_latch> g mysql_mutex_unlock(&buf_pool.mutex);
{buf_pool.page_hash.lock_get(chain)}; goto page_exists;
buf_pool.page_hash.append(chain, &block->page); }
} }
zip_size&= ~1;
if (UNIV_LIKELY(bpage != nullptr))
{
block= nullptr;
/* Insert into the hash table of file pages */
buf_pool.page_hash.append(chain, bpage);
hash_lock.unlock();
/* The block must be put to the LRU list, to the old blocks */ /* The block must be put to the LRU list, to the old blocks */
buf_LRU_add_block(&block->page, true/* to old blocks */); buf_LRU_add_block(bpage, true/* to old blocks */);
if (UNIV_UNLIKELY(zip_size)) if (UNIV_UNLIKELY(zip_size))
{ {
...@@ -110,19 +133,19 @@ static buf_page_t* buf_page_init_for_read(const page_id_t page_id, ...@@ -110,19 +133,19 @@ static buf_page_t* buf_page_init_for_read(const page_id_t page_id,
buf_buddy_alloc(). We must defer this operation until after the buf_buddy_alloc(). We must defer this operation until after the
block descriptor has been added to buf_pool.LRU and block descriptor has been added to buf_pool.LRU and
buf_pool.page_hash. */ buf_pool.page_hash. */
block->page.zip.data= static_cast<page_zip_t*> bpage->zip.data= static_cast<page_zip_t*>(buf_buddy_alloc(zip_size));
(buf_buddy_alloc(zip_size));
/* To maintain the invariant /* To maintain the invariant
block->in_unzip_LRU_list == block->page.belongs_to_unzip_LRU() block->in_unzip_LRU_list == block->page.belongs_to_unzip_LRU()
we have to add this block to unzip_LRU we have to add this block to unzip_LRU
after block->page.zip.data is set. */ after block->page.zip.data is set. */
ut_ad(block->page.belongs_to_unzip_LRU()); ut_ad(bpage->belongs_to_unzip_LRU());
buf_unzip_LRU_add_block(block, TRUE); buf_unzip_LRU_add_block(reinterpret_cast<buf_block_t*>(bpage), TRUE);
} }
} }
else else
{ {
hash_lock.unlock();
/* The compressed page must be allocated before the /* The compressed page must be allocated before the
control block (bpage), in order to avoid the control block (bpage), in order to avoid the
invocation of buf_buddy_relocate_block() on invocation of buf_buddy_relocate_block() on
...@@ -178,44 +201,39 @@ buffer buf_pool if it is not already there, in which case does nothing. ...@@ -178,44 +201,39 @@ buffer buf_pool if it is not already there, in which case does nothing.
Sets the io_fix flag and sets an exclusive lock on the buffer frame. The Sets the io_fix flag and sets an exclusive lock on the buffer frame. The
flag is cleared and the x-lock released by an i/o-handler thread. flag is cleared and the x-lock released by an i/o-handler thread.
@param[in] page_id page id
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0,
bitwise-ORed with 1 in recovery
@param[in,out] chain buf_pool.page_hash cell for page_id
@param[out] err DB_SUCCESS or DB_TABLESPACE_DELETED @param[out] err DB_SUCCESS or DB_TABLESPACE_DELETED
if we are trying if we are trying
to read from a non-existent tablespace to read from a non-existent tablespace
@param[in,out] space tablespace @param[in,out] space tablespace
@param[in,out] block preallocated buffer block
@param[in] sync true if synchronous aio is desired @param[in] sync true if synchronous aio is desired
@param[in] page_id page id @return whether a read request was enqueued */
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@return whether a read request was queued */
static static
bool bool
buf_read_page_low( buf_read_page_low(
const page_id_t page_id,
ulint zip_size,
buf_pool_t::hash_chain& chain,
dberr_t* err, dberr_t* err,
fil_space_t* space, fil_space_t* space,
bool sync, buf_block_t*& block,
const page_id_t page_id, bool sync = false)
ulint zip_size)
{ {
buf_page_t* bpage; buf_page_t* bpage;
*err = DB_SUCCESS; *err = DB_SUCCESS;
if (buf_dblwr.is_inside(page_id)) { ut_ad(!buf_dblwr.is_inside(page_id));
ib::error() << "Trying to read doublewrite buffer page "
<< page_id;
ut_ad(0);
nothing_read:
space->release();
return false;
}
/* The following call will also check if the tablespace does not exist bpage = buf_page_init_for_read(page_id, zip_size, chain, block);
or is being dropped; if we succeed in initing the page in the buffer
pool for read, then DISCARD cannot proceed until the read has
completed */
bpage = buf_page_init_for_read(page_id, zip_size);
if (bpage == NULL) { if (!bpage) {
goto nothing_read; space->release();
return false;
} }
ut_ad(bpage->in_file()); ut_ad(bpage->in_file());
...@@ -228,8 +246,8 @@ buf_read_page_low( ...@@ -228,8 +246,8 @@ buf_read_page_low(
"read page " << page_id << " zip_size=" << zip_size "read page " << page_id << " zip_size=" << zip_size
<< (sync ? " sync" : " async")); << (sync ? " sync" : " async"));
void* dst = zip_size ? bpage->zip.data : bpage->frame; void* dst = zip_size > 1 ? bpage->zip.data : bpage->frame;
const ulint len = zip_size ? zip_size : srv_page_size; const ulint len = zip_size & ~1 ? zip_size & ~1 : srv_page_size;
auto fio = space->io(IORequest(sync auto fio = space->io(IORequest(sync
? IORequest::READ_SYNC ? IORequest::READ_SYNC
...@@ -251,6 +269,23 @@ buf_read_page_low( ...@@ -251,6 +269,23 @@ buf_read_page_low(
return true; return true;
} }
/** Acquire a buffer block. */
static buf_block_t *buf_read_acquire()
{
return buf_LRU_get_free_block(false);
}
/** Free a buffer block if needed. */
static void buf_read_release(buf_block_t *block)
{
if (block)
{
mysql_mutex_lock(&buf_pool.mutex);
buf_LRU_block_free_non_file_page(block);
mysql_mutex_unlock(&buf_pool.mutex);
}
}
/** Applies a random read-ahead in buf_pool if there are at least a threshold /** Applies a random read-ahead in buf_pool if there are at least a threshold
value of accessed pages from the random read-ahead area. Does not read any value of accessed pages from the random read-ahead area. Does not read any
page, not even the one at the position (space, offset), if the read-ahead page, not even the one at the position (space, offset), if the read-ahead
...@@ -306,15 +341,22 @@ ulint buf_read_ahead_random(const page_id_t page_id, ulint zip_size) ...@@ -306,15 +341,22 @@ ulint buf_read_ahead_random(const page_id_t page_id, ulint zip_size)
goto no_read_ahead; goto no_read_ahead;
/* Read all the suitable blocks within the area */ /* Read all the suitable blocks within the area */
buf_block_t *block= zip_size ? nullptr : buf_read_acquire();
for (page_id_t i= low; i < high; ++i) for (page_id_t i= low; i < high; ++i)
{ {
if (space->is_stopping()) if (space->is_stopping())
break; break;
buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(i.fold());
dberr_t err; dberr_t err;
space->reacquire(); space->reacquire();
if (buf_read_page_low(&err, space, false, i, zip_size)) if (buf_read_page_low(i, zip_size, chain, &err, space, block))
{
count++; count++;
ut_ad(!block);
if (!zip_size)
block= buf_read_acquire();
}
} }
if (count) if (count)
...@@ -326,6 +368,7 @@ ulint buf_read_ahead_random(const page_id_t page_id, ulint zip_size) ...@@ -326,6 +368,7 @@ ulint buf_read_ahead_random(const page_id_t page_id, ulint zip_size)
/* Read ahead is considered one I/O operation for the purpose of /* Read ahead is considered one I/O operation for the purpose of
LRU policy decision. */ LRU policy decision. */
buf_LRU_stat_inc_io(); buf_LRU_stat_inc_io();
buf_read_release(block);
buf_pool.stat.n_ra_pages_read_rnd+= count; buf_pool.stat.n_ra_pages_read_rnd+= count;
srv_stats.buf_pool_reads.add(count); srv_stats.buf_pool_reads.add(count);
...@@ -336,14 +379,16 @@ ulint buf_read_ahead_random(const page_id_t page_id, ulint zip_size) ...@@ -336,14 +379,16 @@ ulint buf_read_ahead_random(const page_id_t page_id, ulint zip_size)
if it is not already there. Sets the io_fix and an exclusive lock if it is not already there. Sets the io_fix and an exclusive lock
on the buffer frame. The flag is cleared and the x-lock on the buffer frame. The flag is cleared and the x-lock
released by the i/o-handler thread. released by the i/o-handler thread.
@param[in] page_id page id @param page_id page id
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0 @param zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param chain buf_pool.page_hash cell for page_id
@retval DB_SUCCESS if the page was read and is not corrupted, @retval DB_SUCCESS if the page was read and is not corrupted,
@retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted, @retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted,
@retval DB_DECRYPTION_FAILED if page post encryption checksum matches but @retval DB_DECRYPTION_FAILED if page post encryption checksum matches but
after decryption normal page checksum does not match. after decryption normal page checksum does not match.
@retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */ @retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */
dberr_t buf_read_page(const page_id_t page_id, ulint zip_size) dberr_t buf_read_page(const page_id_t page_id, ulint zip_size,
buf_pool_t::hash_chain &chain)
{ {
fil_space_t *space= fil_space_t::get(page_id.space()); fil_space_t *space= fil_space_t::get(page_id.space());
if (!space) if (!space)
...@@ -353,9 +398,18 @@ dberr_t buf_read_page(const page_id_t page_id, ulint zip_size) ...@@ -353,9 +398,18 @@ dberr_t buf_read_page(const page_id_t page_id, ulint zip_size)
return DB_TABLESPACE_DELETED; return DB_TABLESPACE_DELETED;
} }
buf_block_t *block= zip_size ? nullptr : buf_LRU_get_free_block(false);
/* Our caller should already have ensured that the page does not
exist in buf_pool.page_hash. */
dberr_t err; dberr_t err;
if (buf_read_page_low(&err, space, true, page_id, zip_size)) if (buf_read_page_low(page_id, zip_size, chain, &err, space, block, true))
{
ut_ad(!block);
srv_stats.buf_pool_reads.add(1); srv_stats.buf_pool_reads.add(1);
}
else
buf_read_release(block);
buf_LRU_stat_inc_io(); buf_LRU_stat_inc_io();
return err; return err;
...@@ -371,18 +425,30 @@ released by the i/o-handler thread. ...@@ -371,18 +425,30 @@ released by the i/o-handler thread.
void buf_read_page_background(fil_space_t *space, const page_id_t page_id, void buf_read_page_background(fil_space_t *space, const page_id_t page_id,
ulint zip_size) ulint zip_size)
{ {
dberr_t err; buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(page_id.fold());
if (buf_pool.page_hash_contains(page_id, chain))
{
space->release();
return;
}
buf_block_t *block= zip_size ? nullptr : buf_read_acquire();
if (buf_read_page_low(&err, space, false, page_id, zip_size)) { dberr_t err;
if (buf_read_page_low(page_id, zip_size, chain, &err, space, block))
{
ut_ad(!block);
srv_stats.buf_pool_reads.add(1); srv_stats.buf_pool_reads.add(1);
} }
else
buf_read_release(block);
/* We do not increment number of I/O operations used for LRU policy /* We do not increment number of I/O operations used for LRU policy
here (buf_LRU_stat_inc_io()). We use this in heuristics to decide here (buf_LRU_stat_inc_io()). We use this in heuristics to decide
about evicting uncompressed version of compressed pages from the about evicting uncompressed version of ROW_FORMAT=COMPRESSED pages
buffer pool. Since this function is called from buffer pool load from the buffer pool. Since this function is called from buffer pool
these IOs are deliberate and are not part of normal workload we can load these IOs are deliberate and are not part of normal workload we
ignore these in our heuristics. */ can ignore these in our heuristics. */
} }
/** Applies linear read-ahead if in the buf_pool the page is a border page of /** Applies linear read-ahead if in the buf_pool the page is a border page of
...@@ -515,14 +581,23 @@ ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size) ...@@ -515,14 +581,23 @@ ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size)
} }
/* If we got this far, read-ahead can be sensible: do it */ /* If we got this far, read-ahead can be sensible: do it */
buf_block_t *block= zip_size ? nullptr : buf_read_acquire();
count= 0; count= 0;
for (; new_low != new_high_1; ++new_low) for (; new_low != new_high_1; ++new_low)
{ {
if (space->is_stopping()) if (space->is_stopping())
break; break;
buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(new_low.fold());
dberr_t err; dberr_t err;
space->reacquire(); space->reacquire();
count+= buf_read_page_low(&err, space, false, new_low, zip_size); if (buf_read_page_low(new_low, zip_size, chain, &err, space, block))
{
count++;
ut_ad(!block);
if (!zip_size)
block= buf_read_acquire();
}
} }
if (count) if (count)
...@@ -534,6 +609,7 @@ ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size) ...@@ -534,6 +609,7 @@ ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size)
/* Read ahead is considered one I/O operation for the purpose of /* Read ahead is considered one I/O operation for the purpose of
LRU policy decision. */ LRU policy decision. */
buf_LRU_stat_inc_io(); buf_LRU_stat_inc_io();
buf_read_release(block);
buf_pool.stat.n_ra_pages_read+= count; buf_pool.stat.n_ra_pages_read+= count;
return count; return count;
...@@ -558,7 +634,8 @@ void buf_read_recv_pages(uint32_t space_id, st_::span<uint32_t> page_nos) ...@@ -558,7 +634,8 @@ void buf_read_recv_pages(uint32_t space_id, st_::span<uint32_t> page_nos)
return; return;
} }
const ulint zip_size = space->zip_size(); const ulint zip_size = space->zip_size() | 1;
buf_block_t* block = buf_LRU_get_free_block(false);
for (ulint i = 0; i < page_nos.size(); i++) { for (ulint i = 0; i < page_nos.size(); i++) {
...@@ -588,9 +665,15 @@ void buf_read_recv_pages(uint32_t space_id, st_::span<uint32_t> page_nos) ...@@ -588,9 +665,15 @@ void buf_read_recv_pages(uint32_t space_id, st_::span<uint32_t> page_nos)
} }
} }
buf_pool_t::hash_chain& chain =
buf_pool.page_hash.cell_get(cur_page_id.fold());
dberr_t err; dberr_t err;
space->reacquire(); space->reacquire();
buf_read_page_low(&err, space, false, cur_page_id, zip_size); if (buf_read_page_low(cur_page_id, zip_size, chain, &err, space,
block)) {
ut_ad(!block);
block = buf_LRU_get_free_block(false);
}
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
sql_print_error("InnoDB: Recovery failed to read page " sql_print_error("InnoDB: Recovery failed to read page "
...@@ -600,8 +683,9 @@ void buf_read_recv_pages(uint32_t space_id, st_::span<uint32_t> page_nos) ...@@ -600,8 +683,9 @@ void buf_read_recv_pages(uint32_t space_id, st_::span<uint32_t> page_nos)
} }
} }
DBUG_PRINT("ib_buf", ("recovery read (%zu pages) for %s", DBUG_PRINT("ib_buf", ("recovery read (%zu pages) for %s",
page_nos.size(), space->chain.start->name)); page_nos.size(), space->chain.start->name));
space->release(); space->release();
buf_read_release(block);
} }
...@@ -1385,6 +1385,12 @@ class buf_pool_t ...@@ -1385,6 +1385,12 @@ class buf_pool_t
} }
public: public:
/** @return whether the buffer pool contains a page
@param page_id page identifier
@param chain hash table chain for page_id.fold() */
TRANSACTIONAL_TARGET
bool page_hash_contains(const page_id_t page_id, hash_chain &chain);
/** @return whether less than 1/4 of the buffer pool is available */ /** @return whether less than 1/4 of the buffer pool is available */
TPOOL_SUPPRESS_TSAN TPOOL_SUPPRESS_TSAN
bool running_out() const bool running_out() const
......
...@@ -32,14 +32,16 @@ Created 11/5/1995 Heikki Tuuri ...@@ -32,14 +32,16 @@ Created 11/5/1995 Heikki Tuuri
buffer buf_pool if it is not already there. Sets the io_fix flag and sets buffer buf_pool if it is not already there. Sets the io_fix flag and sets
an exclusive lock on the buffer frame. The flag is cleared and the x-lock an exclusive lock on the buffer frame. The flag is cleared and the x-lock
released by the i/o-handler thread. released by the i/o-handler thread.
@param[in] page_id page id @param page_id page id
@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0 @param zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param chain buf_pool.page_hash cell for page_id
@retval DB_SUCCESS if the page was read and is not corrupted, @retval DB_SUCCESS if the page was read and is not corrupted,
@retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted, @retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted,
@retval DB_DECRYPTION_FAILED if page post encryption checksum matches but @retval DB_DECRYPTION_FAILED if page post encryption checksum matches but
after decryption normal page checksum does not match. after decryption normal page checksum does not match.
@retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */ @retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */
dberr_t buf_read_page(const page_id_t page_id, ulint zip_size); dberr_t buf_read_page(const page_id_t page_id, ulint zip_size,
buf_pool_t::hash_chain &chain);
/** High-level function which reads a page asynchronously from a file to the /** High-level function which reads a page asynchronously from a file to the
buffer buf_pool if it is not already there. Sets the io_fix flag and sets buffer buf_pool if it is not already there. Sets the io_fix flag and sets
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment