MDEV-23693 Failing assertion: my_atomic_load32_explicit(&lock->lock_word,...

MDEV-23693 Failing assertion: my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) == X_LOCK_DECR

InnoDB frees the block lock during buffer pool shrinking when other
thread is yet to release the block lock.  While shrinking the
buffer pool, InnoDB allows the page to be freed unless it is buffer
fixed. In some cases, InnoDB releases the latch after unfixing the
block.

Fix:
====
- InnoDB should unfix the block after releases the latch.

- Add more assertion to check buffer fix while accessing the page.

- Introduced block_hint structure to store buf_block_t pointer
and allow accessing the buf_block_t pointer only by passing a
functor. It returns original buf_block_t* pointer if it is valid
or nullptr if the pointer become stale.

- Replace buf_block_is_uncompressed() with
buf_pool_t::is_block_pointer()

This change is motivated by a change in mysql-5.7.32:
mysql/mysql-server@46e60de444a8fbd876cc6778a7e64a1d3426a48d
Bug #31036301 ASSERTION FAILURE: SYNC0RW.IC:429:LOCK->LOCK_WORD
parent 6a614d69
...@@ -28,6 +28,7 @@ SET(INNOBASE_SOURCES ...@@ -28,6 +28,7 @@ SET(INNOBASE_SOURCES
btr/btr0scrub.cc btr/btr0scrub.cc
btr/btr0sea.cc btr/btr0sea.cc
btr/btr0defragment.cc btr/btr0defragment.cc
buf/buf0block_hint.cc
buf/buf0buddy.cc buf/buf0buddy.cc
buf/buf0buf.cc buf/buf0buf.cc
buf/buf0dblwr.cc buf/buf0dblwr.cc
......
...@@ -695,6 +695,8 @@ PageBulk::latch() ...@@ -695,6 +695,8 @@ PageBulk::latch()
m_mtr.set_named_space(m_index->space); m_mtr.set_named_space(m_index->space);
} }
ut_ad(m_block->page.buf_fix_count);
/* In case the block is S-latched by page_cleaner. */ /* In case the block is S-latched by page_cleaner. */
if (!buf_page_optimistic_get(RW_X_LATCH, m_block, m_modify_clock, if (!buf_page_optimistic_get(RW_X_LATCH, m_block, m_modify_clock,
__FILE__, __LINE__, &m_mtr)) { __FILE__, __LINE__, &m_mtr)) {
...@@ -713,6 +715,8 @@ PageBulk::latch() ...@@ -713,6 +715,8 @@ PageBulk::latch()
buf_block_buf_fix_dec(m_block); buf_block_buf_fix_dec(m_block);
ut_ad(m_block->page.buf_fix_count);
ut_ad(m_cur_rec > m_page && m_cur_rec < m_heap_top); ut_ad(m_cur_rec > m_page && m_cur_rec < m_heap_top);
return (m_err); return (m_err);
......
...@@ -416,6 +416,8 @@ btr_cur_optimistic_latch_leaves( ...@@ -416,6 +416,8 @@ btr_cur_optimistic_latch_leaves(
ulint mode; ulint mode;
ulint left_page_no; ulint left_page_no;
ulint curr_page_no; ulint curr_page_no;
ut_ad(block->page.buf_fix_count);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
switch (*latch_mode) { switch (*latch_mode) {
case BTR_SEARCH_LEAF: case BTR_SEARCH_LEAF:
...@@ -427,20 +429,10 @@ btr_cur_optimistic_latch_leaves( ...@@ -427,20 +429,10 @@ btr_cur_optimistic_latch_leaves(
mode = *latch_mode == BTR_SEARCH_PREV mode = *latch_mode == BTR_SEARCH_PREV
? RW_S_LATCH : RW_X_LATCH; ? RW_S_LATCH : RW_X_LATCH;
buf_page_mutex_enter(block);
if (buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
buf_page_mutex_exit(block);
return(false);
}
/* pin the block not to be relocated */
buf_block_buf_fix_inc(block, file, line);
buf_page_mutex_exit(block);
rw_lock_s_lock(&block->lock); rw_lock_s_lock(&block->lock);
if (block->modify_clock != modify_clock) { if (block->modify_clock != modify_clock) {
rw_lock_s_unlock(&block->lock); rw_lock_s_unlock(&block->lock);
return false;
goto unpin_failed;
} }
curr_page_no = block->page.id.page_no(); curr_page_no = block->page.id.page_no();
...@@ -470,7 +462,7 @@ btr_cur_optimistic_latch_leaves( ...@@ -470,7 +462,7 @@ btr_cur_optimistic_latch_leaves(
/* release the left block */ /* release the left block */
btr_leaf_page_release( btr_leaf_page_release(
cursor->left_block, mode, mtr); cursor->left_block, mode, mtr);
goto unpin_failed; return false;
} }
} else { } else {
cursor->left_block = NULL; cursor->left_block = NULL;
...@@ -480,23 +472,28 @@ btr_cur_optimistic_latch_leaves( ...@@ -480,23 +472,28 @@ btr_cur_optimistic_latch_leaves(
file, line, mtr)) { file, line, mtr)) {
if (btr_page_get_prev(buf_block_get_frame(block)) if (btr_page_get_prev(buf_block_get_frame(block))
== left_page_no) { == left_page_no) {
buf_block_buf_fix_dec(block); /* block was already buffer-fixed while
entering the function and
buf_page_optimistic_get() buffer-fixes
it again. */
ut_ad(2 <= block->page.buf_fix_count);
*latch_mode = mode; *latch_mode = mode;
return(true); return(true);
} else { } else {
/* release the block */ /* release the block and decrement of
buf_fix_count which was incremented
in buf_page_optimistic_get() */
btr_leaf_page_release(block, mode, mtr); btr_leaf_page_release(block, mode, mtr);
} }
} }
ut_ad(block->page.buf_fix_count);
/* release the left block */ /* release the left block */
if (cursor->left_block != NULL) { if (cursor->left_block != NULL) {
btr_leaf_page_release(cursor->left_block, btr_leaf_page_release(cursor->left_block,
mode, mtr); mode, mtr);
} }
unpin_failed:
/* unpin the block */
buf_block_buf_fix_dec(block);
return(false); return(false);
default: default:
...@@ -1066,12 +1063,7 @@ btr_cur_search_to_nth_level_func( ...@@ -1066,12 +1063,7 @@ btr_cur_search_to_nth_level_func(
guess = NULL; guess = NULL;
#else #else
info = btr_search_get_info(index); info = btr_search_get_info(index);
guess = info->root_guess;
if (!buf_pool_is_obsolete(info->withdraw_clock)) {
guess = info->root_guess;
} else {
guess = NULL;
}
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
rw_lock_t* const search_latch = btr_get_search_latch(index); rw_lock_t* const search_latch = btr_get_search_latch(index);
...@@ -1509,10 +1501,7 @@ btr_cur_search_to_nth_level_func( ...@@ -1509,10 +1501,7 @@ btr_cur_search_to_nth_level_func(
} }
#ifdef BTR_CUR_ADAPT #ifdef BTR_CUR_ADAPT
if (block != guess) { info->root_guess = block;
info->root_guess = block;
info->withdraw_clock = buf_withdraw_clock;
}
#endif #endif
} }
......
...@@ -165,11 +165,10 @@ btr_pcur_store_position( ...@@ -165,11 +165,10 @@ btr_pcur_store_position(
index, rec, &cursor->old_n_fields, index, rec, &cursor->old_n_fields,
&cursor->old_rec_buf, &cursor->buf_size); &cursor->old_rec_buf, &cursor->buf_size);
cursor->block_when_stored = block; cursor->block_when_stored.store(block);
/* Function try to check if block is S/X latch. */ /* Function try to check if block is S/X latch. */
cursor->modify_clock = buf_block_get_modify_clock(block); cursor->modify_clock = buf_block_get_modify_clock(block);
cursor->withdraw_clock = buf_withdraw_clock;
} }
/**************************************************************//** /**************************************************************//**
...@@ -199,6 +198,26 @@ btr_pcur_copy_stored_position( ...@@ -199,6 +198,26 @@ btr_pcur_copy_stored_position(
pcur_receive->old_n_fields = pcur_donate->old_n_fields; pcur_receive->old_n_fields = pcur_donate->old_n_fields;
} }
/** Structure acts as functor to do the latching of leaf pages.
It returns true if latching of leaf pages succeeded and false
otherwise. */
struct optimistic_latch_leaves
{
btr_pcur_t *const cursor;
ulint *latch_mode;
mtr_t *const mtr;
optimistic_latch_leaves(btr_pcur_t *cursor, ulint *latch_mode, mtr_t *mtr)
:cursor(cursor), latch_mode(latch_mode), mtr(mtr) {}
bool operator() (buf_block_t *hint) const
{
return hint && btr_cur_optimistic_latch_leaves(
hint, cursor->modify_clock, latch_mode,
btr_pcur_get_btr_cur(cursor), __FILE__, __LINE__, mtr);
}
};
/**************************************************************//** /**************************************************************//**
Restores the stored position of a persistent cursor bufferfixing the page and Restores the stored position of a persistent cursor bufferfixing the page and
obtaining the specified latches. If the cursor position was saved when the obtaining the specified latches. If the cursor position was saved when the
...@@ -261,7 +280,7 @@ btr_pcur_restore_position_func( ...@@ -261,7 +280,7 @@ btr_pcur_restore_position_func(
cursor->latch_mode = cursor->latch_mode =
BTR_LATCH_MODE_WITHOUT_INTENTION(latch_mode); BTR_LATCH_MODE_WITHOUT_INTENTION(latch_mode);
cursor->pos_state = BTR_PCUR_IS_POSITIONED; cursor->pos_state = BTR_PCUR_IS_POSITIONED;
cursor->block_when_stored = btr_pcur_get_block(cursor); cursor->block_when_stored.clear();
return(FALSE); return(FALSE);
} }
...@@ -276,12 +295,9 @@ btr_pcur_restore_position_func( ...@@ -276,12 +295,9 @@ btr_pcur_restore_position_func(
case BTR_MODIFY_PREV: case BTR_MODIFY_PREV:
/* Try optimistic restoration. */ /* Try optimistic restoration. */
if (!buf_pool_is_obsolete(cursor->withdraw_clock) if (cursor->block_when_stored.run_with_hint(
&& btr_cur_optimistic_latch_leaves( optimistic_latch_leaves(cursor, &latch_mode,
cursor->block_when_stored, cursor->modify_clock, mtr))) {
&latch_mode, btr_pcur_get_btr_cur(cursor),
file, line, mtr)) {
cursor->pos_state = BTR_PCUR_IS_POSITIONED; cursor->pos_state = BTR_PCUR_IS_POSITIONED;
cursor->latch_mode = latch_mode; cursor->latch_mode = latch_mode;
...@@ -378,11 +394,10 @@ btr_pcur_restore_position_func( ...@@ -378,11 +394,10 @@ btr_pcur_restore_position_func(
since the cursor can now be on a different page! since the cursor can now be on a different page!
But we can retain the value of old_rec */ But we can retain the value of old_rec */
cursor->block_when_stored = btr_pcur_get_block(cursor); cursor->block_when_stored.store(btr_pcur_get_block(cursor));
cursor->modify_clock = buf_block_get_modify_clock( cursor->modify_clock = buf_block_get_modify_clock(
cursor->block_when_stored); cursor->block_when_stored.block());
cursor->old_stored = true; cursor->old_stored = true;
cursor->withdraw_clock = buf_withdraw_clock;
mem_heap_free(heap); mem_heap_free(heap);
......
/*****************************************************************************
Copyright (c) 2020, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License, version 2.0, as published by the
Free Software Foundation.
This program is also distributed with certain software (including but not
limited to OpenSSL) that is licensed under separate terms, as designated in a
particular file or component or in included license documentation. The authors
of MySQL hereby grant you an additional permission to link the program and
your derivative works with the separately licensed software that they have
included with MySQL.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*****************************************************************************/
#include "buf0block_hint.h"
namespace buf {
void Block_hint::buffer_fix_block_if_still_valid()
{
/* We need to check if m_block points to one of chunks. For this to be
meaningful we need to prevent freeing memory while we check, and until we
buffer-fix the block. For this purpose it is enough to latch any of the many
latches taken by buf_resize().
However, for buffer-fixing to be meaningful, the block has to contain a page
(as opposed to being already empty, which might mean that buf_pool_resize()
can proceed and free it once we free the s-latch), so we confirm that the
block contains a page. However, it is not sufficient to check that this is
just any page, because just after we check it could get freed, unless we
have a latch which prevents this. This is tricky because page_hash latches
are sharded by page_id and we don't know the page_id until we look into the
block. To solve this chicken-and-egg problem somewhat, we latch the shard
for the m_page_id and compare block->page.id to it - so if is equal then we
can be reasonably sure that we have the correct latch.
There is still a theoretical problem here, where other threads might try
to modify the m_block->page.id while we are comparing it, but the chance of
accidentally causing the old space_id == m_page_id.m_space and the new
page_no == m_page_id.m_page_no is minimal as compilers emit a single 8-byte
comparison instruction to compare both at the same time atomically, and f()
will probably double-check the block->page.id again, anyway.
Finally, assuming that we have correct hash bucket latched, we should check if
the state of the block is BUF_BLOCK_FILE_PAGE before buffer-fixing the block,
as otherwise we risk buffer-fixing and operating on a block, which is already
meant to be freed. In particular, buf_LRU_free_page() first calls
buf_LRU_block_remove_hashed() under hash bucket latch protection to change the
state to BUF_BLOCK_REMOVE_HASH and then releases the latch. Later it calls
buf_LRU_block_free_hashed_page() without any latch to change the state to
BUF_BLOCK_MEMORY and reset the page's id, which means buf_resize() can free it
regardless of our buffer-fixing. */
if (m_block)
{
const buf_pool_t *const buf_pool= buf_pool_get(m_page_id);
rw_lock_t *latch= buf_page_hash_lock_get(buf_pool, m_page_id);
rw_lock_s_lock(latch);
/* If not own buf_pool_mutex, page_hash can be changed. */
latch= buf_page_hash_lock_s_confirm(latch, buf_pool, m_page_id);
if (buf_pool->is_block_field(m_block) &&
m_page_id == m_block->page.id &&
buf_block_get_state(m_block) == BUF_BLOCK_FILE_PAGE)
buf_block_buf_fix_inc(m_block, __FILE__, __LINE__);
else
clear();
rw_lock_s_unlock(latch);
}
}
} // namespace buf
...@@ -339,14 +339,6 @@ buf_pool_t* buf_pool_ptr; ...@@ -339,14 +339,6 @@ buf_pool_t* buf_pool_ptr;
/** true when resizing buffer pool is in the critical path. */ /** true when resizing buffer pool is in the critical path. */
volatile bool buf_pool_resizing; volatile bool buf_pool_resizing;
/** true when withdrawing buffer pool pages might cause page relocation */
volatile bool buf_pool_withdrawing;
/** the clock is incremented every time a pointer to a page may become obsolete;
if the withdrwa clock has not changed, the pointer is still valid in buffer
pool. if changed, the pointer might not be in buffer pool any more. */
volatile ulint buf_withdraw_clock;
/** Map of buffer pool chunks by its first frame address /** Map of buffer pool chunks by its first frame address
This is newly made by initialization of buffer pool and buf_resize_thread. This is newly made by initialization of buffer pool and buf_resize_thread.
Currently, no need mutex protection for update. */ Currently, no need mutex protection for update. */
...@@ -2068,8 +2060,6 @@ buf_pool_init( ...@@ -2068,8 +2060,6 @@ buf_pool_init(
NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE; NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE;
buf_pool_resizing = false; buf_pool_resizing = false;
buf_pool_withdrawing = false;
buf_withdraw_clock = 0;
buf_pool_ptr = (buf_pool_t*) ut_zalloc_nokey( buf_pool_ptr = (buf_pool_t*) ut_zalloc_nokey(
n_instances * sizeof *buf_pool_ptr); n_instances * sizeof *buf_pool_ptr);
...@@ -2129,7 +2119,6 @@ buf_page_realloc( ...@@ -2129,7 +2119,6 @@ buf_page_realloc(
{ {
buf_block_t* new_block; buf_block_t* new_block;
ut_ad(buf_pool_withdrawing);
ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(buf_pool_mutex_own(buf_pool));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
...@@ -2551,9 +2540,6 @@ buf_pool_withdraw_blocks( ...@@ -2551,9 +2540,6 @@ buf_pool_withdraw_blocks(
ib::info() << "buffer pool " << i << " : withdrawn target " ib::info() << "buffer pool " << i << " : withdrawn target "
<< UT_LIST_GET_LEN(buf_pool->withdraw) << " blocks."; << UT_LIST_GET_LEN(buf_pool->withdraw) << " blocks.";
/* retry is not needed */
++buf_withdraw_clock;
return(false); return(false);
} }
...@@ -2650,7 +2636,6 @@ buf_pool_resize() ...@@ -2650,7 +2636,6 @@ buf_pool_resize()
NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE; NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE;
ut_ad(!buf_pool_resizing); ut_ad(!buf_pool_resizing);
ut_ad(!buf_pool_withdrawing);
ut_ad(srv_buf_pool_chunk_unit > 0); ut_ad(srv_buf_pool_chunk_unit > 0);
new_instance_size = srv_buf_pool_size / srv_buf_pool_instances; new_instance_size = srv_buf_pool_size / srv_buf_pool_instances;
...@@ -2717,7 +2702,6 @@ buf_pool_resize() ...@@ -2717,7 +2702,6 @@ buf_pool_resize()
ut_ad(buf_pool->withdraw_target == 0); ut_ad(buf_pool->withdraw_target == 0);
buf_pool->withdraw_target = withdraw_target; buf_pool->withdraw_target = withdraw_target;
buf_pool_withdrawing = true;
} }
} }
...@@ -2742,7 +2726,6 @@ buf_pool_resize() ...@@ -2742,7 +2726,6 @@ buf_pool_resize()
if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
/* abort to resize for shutdown. */ /* abort to resize for shutdown. */
buf_pool_withdrawing = false;
return; return;
} }
...@@ -2804,7 +2787,6 @@ buf_pool_resize() ...@@ -2804,7 +2787,6 @@ buf_pool_resize()
goto withdraw_retry; goto withdraw_retry;
} }
buf_pool_withdrawing = false;
buf_resize_status("Latching whole of buffer pool."); buf_resize_status("Latching whole of buffer pool.");
...@@ -3981,37 +3963,6 @@ buf_block_from_ahi(const byte* ptr) ...@@ -3981,37 +3963,6 @@ buf_block_from_ahi(const byte* ptr)
} }
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
/********************************************************************//**
Find out if a pointer belongs to a buf_block_t. It can be a pointer to
the buf_block_t itself or a member of it. This functions checks one of
the buffer pool instances.
@return TRUE if ptr belongs to a buf_block_t struct */
static
ibool
buf_pointer_is_block_field_instance(
/*================================*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
const void* ptr) /*!< in: pointer not dereferenced */
{
const buf_chunk_t* chunk = buf_pool->chunks;
const buf_chunk_t* const echunk = chunk + ut_min(
buf_pool->n_chunks, buf_pool->n_chunks_new);
/* TODO: protect buf_pool->chunks with a mutex (the older pointer will
currently remain while during buf_pool_resize()) */
while (chunk < echunk) {
if (ptr >= (void*) chunk->blocks
&& ptr < (void*) (chunk->blocks + chunk->size)) {
return(TRUE);
}
chunk++;
}
return(FALSE);
}
/********************************************************************//** /********************************************************************//**
Find out if a pointer belongs to a buf_block_t. It can be a pointer to Find out if a pointer belongs to a buf_block_t. It can be a pointer to
the buf_block_t itself or a member of it the buf_block_t itself or a member of it
...@@ -4024,11 +3975,7 @@ buf_pointer_is_block_field( ...@@ -4024,11 +3975,7 @@ buf_pointer_is_block_field(
ulint i; ulint i;
for (i = 0; i < srv_buf_pool_instances; i++) { for (i = 0; i < srv_buf_pool_instances; i++) {
ibool found; if (buf_pool_from_array(i)->is_block_field(ptr)) {
found = buf_pointer_is_block_field_instance(
buf_pool_from_array(i), ptr);
if (found) {
return(TRUE); return(TRUE);
} }
} }
...@@ -4036,25 +3983,6 @@ buf_pointer_is_block_field( ...@@ -4036,25 +3983,6 @@ buf_pointer_is_block_field(
return(FALSE); return(FALSE);
} }
/********************************************************************//**
Find out if a buffer block was created by buf_chunk_init().
@return TRUE if "block" has been added to buf_pool->free by buf_chunk_init() */
static
ibool
buf_block_is_uncompressed(
/*======================*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
const buf_block_t* block) /*!< in: pointer to block,
not dereferenced */
{
if ((((ulint) block) % sizeof *block) != 0) {
/* The pointer should be aligned. */
return(FALSE);
}
return(buf_pointer_is_block_field_instance(buf_pool, (void*) block));
}
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG #if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/********************************************************************//** /********************************************************************//**
Return true if probe is enabled. Return true if probe is enabled.
...@@ -4293,7 +4221,7 @@ buf_page_get_low( ...@@ -4293,7 +4221,7 @@ buf_page_get_low(
has been allocated by buf_page_alloc_descriptor(), has been allocated by buf_page_alloc_descriptor(),
it may have been freed by buf_relocate(). */ it may have been freed by buf_relocate(). */
if (!buf_block_is_uncompressed(buf_pool, block) if (!buf_pool->is_block_field(block)
|| page_id != block->page.id || page_id != block->page.id
|| buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) { || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
......
...@@ -1257,6 +1257,24 @@ rtr_check_discard_page( ...@@ -1257,6 +1257,24 @@ rtr_check_discard_page(
lock_mutex_exit(); lock_mutex_exit();
} }
/** Structure acts as functor to get the optimistic access of the page.
It returns true if it successfully gets the page. */
struct optimistic_get
{
btr_pcur_t *const r_cursor;
mtr_t *const mtr;
optimistic_get(btr_pcur_t *r_cursor,mtr_t *mtr)
:r_cursor(r_cursor), mtr(mtr) {}
bool operator()(buf_block_t *hint) const
{
return hint && buf_page_optimistic_get(
RW_X_LATCH, hint, r_cursor->modify_clock, __FILE__,
__LINE__, mtr);
}
};
/** Restore the stored position of a persistent cursor bufferfixing the page */ /** Restore the stored position of a persistent cursor bufferfixing the page */
static static
bool bool
...@@ -1290,11 +1308,8 @@ rtr_cur_restore_position( ...@@ -1290,11 +1308,8 @@ rtr_cur_restore_position(
ut_ad(latch_mode == BTR_CONT_MODIFY_TREE); ut_ad(latch_mode == BTR_CONT_MODIFY_TREE);
if (!buf_pool_is_obsolete(r_cursor->withdraw_clock) if (r_cursor->block_when_stored.run_with_hint(
&& buf_page_optimistic_get(RW_X_LATCH, optimistic_get(r_cursor, mtr))) {
r_cursor->block_when_stored,
r_cursor->modify_clock,
__FILE__, __LINE__, mtr)) {
ut_ad(r_cursor->pos_state == BTR_PCUR_IS_POSITIONED); ut_ad(r_cursor->pos_state == BTR_PCUR_IS_POSITIONED);
ut_ad(r_cursor->rel_pos == BTR_PCUR_ON); ut_ad(r_cursor->rel_pos == BTR_PCUR_ON);
......
...@@ -29,6 +29,7 @@ Created 2/23/1996 Heikki Tuuri ...@@ -29,6 +29,7 @@ Created 2/23/1996 Heikki Tuuri
#include "dict0dict.h" #include "dict0dict.h"
#include "btr0cur.h" #include "btr0cur.h"
#include "buf0block_hint.h"
#include "btr0btr.h" #include "btr0btr.h"
#include "gis0rtree.h" #include "gis0rtree.h"
...@@ -514,13 +515,10 @@ struct btr_pcur_t{ ...@@ -514,13 +515,10 @@ struct btr_pcur_t{
whether cursor was on, before, or after the old_rec record */ whether cursor was on, before, or after the old_rec record */
enum btr_pcur_pos_t rel_pos; enum btr_pcur_pos_t rel_pos;
/** buffer block when the position was stored */ /** buffer block when the position was stored */
buf_block_t* block_when_stored; buf::Block_hint block_when_stored;
/** the modify clock value of the buffer block when the cursor position /** the modify clock value of the buffer block when the cursor position
was stored */ was stored */
ib_uint64_t modify_clock; ib_uint64_t modify_clock;
/** the withdraw clock value of the buffer pool when the cursor
position was stored */
ulint withdraw_clock;
/** btr_pcur_store_position() and btr_pcur_restore_position() state. */ /** btr_pcur_store_position() and btr_pcur_restore_position() state. */
enum pcur_pos_t pos_state; enum pcur_pos_t pos_state;
/** PAGE_CUR_G, ... */ /** PAGE_CUR_G, ... */
...@@ -540,9 +538,8 @@ struct btr_pcur_t{ ...@@ -540,9 +538,8 @@ struct btr_pcur_t{
btr_pcur_t() : btr_pcur_t() :
btr_cur(), latch_mode(0), old_stored(false), old_rec(NULL), btr_cur(), latch_mode(0), old_stored(false), old_rec(NULL),
old_n_fields(0), rel_pos(btr_pcur_pos_t(0)), old_n_fields(0), rel_pos(btr_pcur_pos_t(0)),
block_when_stored(NULL), block_when_stored(),
modify_clock(0), withdraw_clock(0), modify_clock(0), pos_state(BTR_PCUR_NOT_POSITIONED),
pos_state(BTR_PCUR_NOT_POSITIONED),
search_mode(PAGE_CUR_UNSUPP), trx_if_known(NULL), search_mode(PAGE_CUR_UNSUPP), trx_if_known(NULL),
old_rec_buf(NULL), buf_size(0) old_rec_buf(NULL), buf_size(0)
{ {
......
/*****************************************************************************
Copyright (c) 2020, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License, version 2.0, as published by the
Free Software Foundation.
This program is also distributed with certain software (including but not
limited to OpenSSL) that is licensed under separate terms, as designated in a
particular file or component or in included license documentation. The authors
of MySQL hereby grant you an additional permission to link the program and
your derivative works with the separately licensed software that they have
included with MySQL.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*****************************************************************************/
#pragma once
#include "buf0buf.h"
namespace buf {
class Block_hint {
public:
Block_hint():m_block(NULL),m_page_id(0,0) {}
/** Stores the pointer to the block, which is currently buffer-fixed.
@param block a pointer to a buffer-fixed block to be stored */
inline void store(buf_block_t *block)
{
ut_ad(block->page.buf_fix_count);
m_block= block;
m_page_id= block->page.id;
}
/** Clears currently stored pointer. */
inline void clear() { m_block= NULL; }
/** Invoke f on m_block(which may be null)
@param f The function to be executed. It will be passed the pointer.
If you wish to use the block pointer subsequently,
you need to ensure you buffer-fix it before returning from f.
@return the return value of f
*/
template <typename F>
bool run_with_hint(const F &f)
{
buffer_fix_block_if_still_valid();
/* m_block could be changed during f() call, so we use local
variable to remember which block we need to unfix */
buf_block_t *block= m_block;
bool res= f(block);
if (block)
buf_block_buf_fix_dec(block);
return res;
}
buf_block_t *block() const { return m_block; }
private:
/** The block pointer stored by store(). */
buf_block_t *m_block;
/** If m_block is non-null, the m_block->page.id at time it was stored. */
page_id_t m_page_id;
/** A helper function which checks if m_block is not a dangling pointer and
still points to block with page with m_page_id and if so, buffer-fixes it,
otherwise clear()s it */
void buffer_fix_block_if_still_valid();
};
} // namespace buf
...@@ -101,10 +101,6 @@ extern buf_pool_t* buf_pool_ptr; /*!< The buffer pools ...@@ -101,10 +101,6 @@ extern buf_pool_t* buf_pool_ptr; /*!< The buffer pools
extern volatile bool buf_pool_withdrawing; /*!< true when withdrawing buffer extern volatile bool buf_pool_withdrawing; /*!< true when withdrawing buffer
pool pages might cause page relocation */ pool pages might cause page relocation */
extern volatile ulint buf_withdraw_clock; /*!< the clock is incremented
every time a pointer to a page may
become obsolete */
# ifdef UNIV_DEBUG # ifdef UNIV_DEBUG
extern my_bool buf_disable_resize_buffer_pool_debug; /*!< if TRUE, resizing extern my_bool buf_disable_resize_buffer_pool_debug; /*!< if TRUE, resizing
buffer pool is not allowed. */ buffer pool is not allowed. */
...@@ -1373,14 +1369,6 @@ buf_get_nth_chunk_block( ...@@ -1373,14 +1369,6 @@ buf_get_nth_chunk_block(
ulint n, /*!< in: nth chunk in the buffer pool */ ulint n, /*!< in: nth chunk in the buffer pool */
ulint* chunk_size); /*!< in: chunk size */ ulint* chunk_size); /*!< in: chunk size */
/** Verify the possibility that a stored page is not in buffer pool.
@param[in] withdraw_clock withdraw clock when stored the page
@retval true if the page might be relocated */
UNIV_INLINE
bool
buf_pool_is_obsolete(
ulint withdraw_clock);
/** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit, /** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit,
if needed. if needed.
@param[in] size size in bytes @param[in] size size in bytes
...@@ -2207,6 +2195,11 @@ struct buf_pool_t{ ...@@ -2207,6 +2195,11 @@ struct buf_pool_t{
buf_tmp_array_t* tmp_arr; buf_tmp_array_t* tmp_arr;
/*!< Array for temporal memory /*!< Array for temporal memory
used in compression and encryption */ used in compression and encryption */
/** Determine if a pointer belongs to a buf_block_t.
It can be a pointer to the buf_block_t itself or a member of it.
@param ptr a pointer that will not be dereferenced
@return whether the ptr belongs to a buf_block_t struct */
inline bool is_block_field(const void *ptr) const;
#if BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN #if BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN
# error "BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN" # error "BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN"
......
...@@ -54,6 +54,25 @@ struct buf_chunk_t{ ...@@ -54,6 +54,25 @@ struct buf_chunk_t{
} }
}; };
bool buf_pool_t::is_block_field(const void *ptr) const
{
const buf_chunk_t* chunk= chunks;
const buf_chunk_t *const echunk= chunk + ut_min(n_chunks,
n_chunks_new);
/* TODO: protect chunks with a mutex (the older pointer will
currently remain during resize()) */
while (chunk < echunk)
{
if (ptr >= reinterpret_cast<const void*>(chunk->blocks) &&
ptr < reinterpret_cast<const void*>(
chunk->blocks + chunk->size))
return true;
chunk++;
}
return false;
}
/*********************************************************************//** /*********************************************************************//**
Gets the current size of buffer buf_pool in bytes. Gets the current size of buffer buf_pool in bytes.
@return size in bytes */ @return size in bytes */
...@@ -1056,8 +1075,6 @@ buf_block_buf_fix_dec( ...@@ -1056,8 +1075,6 @@ buf_block_buf_fix_dec(
/*==================*/ /*==================*/
buf_block_t* block) /*!< in/out: block to bufferunfix */ buf_block_t* block) /*!< in/out: block to bufferunfix */
{ {
buf_block_unfix(block);
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
/* No debug latch is acquired if block belongs to system temporary. /* No debug latch is acquired if block belongs to system temporary.
Debug latch is not of much help if access to block is single Debug latch is not of much help if access to block is single
...@@ -1066,6 +1083,8 @@ buf_block_buf_fix_dec( ...@@ -1066,6 +1083,8 @@ buf_block_buf_fix_dec(
rw_lock_s_unlock(&block->debug_latch); rw_lock_s_unlock(&block->debug_latch);
} }
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
buf_block_unfix(block);
} }
/** Returns the buffer pool instance given a page id. /** Returns the buffer pool instance given a page id.
...@@ -1439,18 +1458,6 @@ buf_page_get_frame( ...@@ -1439,18 +1458,6 @@ buf_page_get_frame(
} }
} }
/** Verify the possibility that a stored page is not in buffer pool.
@param[in] withdraw_clock withdraw clock when stored the page
@retval true if the page might be relocated */
UNIV_INLINE
bool
buf_pool_is_obsolete(
ulint withdraw_clock)
{
return(buf_pool_withdrawing
|| buf_withdraw_clock != withdraw_clock);
}
/** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit, /** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit,
if needed. if needed.
@param[in] size size in bytes @param[in] size size in bytes
......
...@@ -170,10 +170,10 @@ mtr_t::release_block_at_savepoint( ...@@ -170,10 +170,10 @@ mtr_t::release_block_at_savepoint(
ut_a(slot->object == block); ut_a(slot->object == block);
buf_block_unfix(reinterpret_cast<buf_block_t*>(block));
buf_page_release_latch(block, slot->type); buf_page_release_latch(block, slot->type);
buf_block_unfix(reinterpret_cast<buf_block_t*>(block));
slot->object = NULL; slot->object = NULL;
} }
......
...@@ -424,8 +424,6 @@ struct trx_undo_t { ...@@ -424,8 +424,6 @@ struct trx_undo_t {
undo_no_t top_undo_no; /*!< undo number of the latest record */ undo_no_t top_undo_no; /*!< undo number of the latest record */
buf_block_t* guess_block; /*!< guess for the buffer block where buf_block_t* guess_block; /*!< guess for the buffer block where
the top page might reside */ the top page might reside */
ulint withdraw_clock; /*!< the withdraw clock value of the
buffer pool when guess_block was stored */
/*-----------------------------*/ /*-----------------------------*/
UT_LIST_NODE_T(trx_undo_t) undo_list; UT_LIST_NODE_T(trx_undo_t) undo_list;
/*!< undo log objects in the rollback /*!< undo log objects in the rollback
......
...@@ -233,8 +233,8 @@ static void memo_slot_release(mtr_memo_slot_t *slot) ...@@ -233,8 +233,8 @@ static void memo_slot_release(mtr_memo_slot_t *slot)
case MTR_MEMO_PAGE_SX_FIX: case MTR_MEMO_PAGE_SX_FIX:
case MTR_MEMO_PAGE_X_FIX: case MTR_MEMO_PAGE_X_FIX:
buf_block_t *block= reinterpret_cast<buf_block_t*>(slot->object); buf_block_t *block= reinterpret_cast<buf_block_t*>(slot->object);
buf_block_unfix(block);
buf_page_release_latch(block, slot->type); buf_page_release_latch(block, slot->type);
buf_block_unfix(block);
break; break;
} }
slot->object= NULL; slot->object= NULL;
...@@ -276,8 +276,8 @@ struct ReleaseLatches { ...@@ -276,8 +276,8 @@ struct ReleaseLatches {
case MTR_MEMO_PAGE_SX_FIX: case MTR_MEMO_PAGE_SX_FIX:
case MTR_MEMO_PAGE_X_FIX: case MTR_MEMO_PAGE_X_FIX:
buf_block_t *block= reinterpret_cast<buf_block_t*>(slot->object); buf_block_t *block= reinterpret_cast<buf_block_t*>(slot->object);
buf_block_unfix(block);
buf_page_release_latch(block, slot->type); buf_page_release_latch(block, slot->type);
buf_block_unfix(block);
break; break;
} }
slot->object= NULL; slot->object= NULL;
......
...@@ -1950,8 +1950,7 @@ dberr_t trx_undo_report_rename(trx_t* trx, const dict_table_t* table) ...@@ -1950,8 +1950,7 @@ dberr_t trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
buf_block_t* block = buf_page_get_gen( buf_block_t* block = buf_page_get_gen(
page_id_t(undo->space, undo->last_page_no), page_id_t(undo->space, undo->last_page_no),
univ_page_size, RW_X_LATCH, univ_page_size, RW_X_LATCH,
buf_pool_is_obsolete(undo->withdraw_clock) undo->guess_block,
? NULL : undo->guess_block,
BUF_GET, __FILE__, __LINE__, &mtr, &err); BUF_GET, __FILE__, __LINE__, &mtr, &err);
ut_ad((err == DB_SUCCESS) == !!block); ut_ad((err == DB_SUCCESS) == !!block);
...@@ -1962,7 +1961,6 @@ dberr_t trx_undo_report_rename(trx_t* trx, const dict_table_t* table) ...@@ -1962,7 +1961,6 @@ dberr_t trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
if (ulint offset = trx_undo_page_report_rename( if (ulint offset = trx_undo_page_report_rename(
trx, table, block, &mtr)) { trx, table, block, &mtr)) {
undo->withdraw_clock = buf_withdraw_clock;
undo->empty = FALSE; undo->empty = FALSE;
undo->top_page_no = undo->last_page_no; undo->top_page_no = undo->last_page_no;
undo->top_offset = offset; undo->top_offset = offset;
...@@ -2084,8 +2082,7 @@ trx_undo_report_row_operation( ...@@ -2084,8 +2082,7 @@ trx_undo_report_row_operation(
undo_block = buf_page_get_gen( undo_block = buf_page_get_gen(
page_id_t(undo->space, page_no), univ_page_size, RW_X_LATCH, page_id_t(undo->space, page_no), univ_page_size, RW_X_LATCH,
buf_pool_is_obsolete(undo->withdraw_clock) undo->guess_block, BUF_GET, __FILE__, __LINE__,
? NULL : undo->guess_block, BUF_GET, __FILE__, __LINE__,
&mtr, &err); &mtr, &err);
buf_block_dbg_add_level(undo_block, SYNC_TRX_UNDO_PAGE); buf_block_dbg_add_level(undo_block, SYNC_TRX_UNDO_PAGE);
...@@ -2138,14 +2135,13 @@ trx_undo_report_row_operation( ...@@ -2138,14 +2135,13 @@ trx_undo_report_row_operation(
mtr_commit(&mtr); mtr_commit(&mtr);
} else { } else {
/* Success */ /* Success */
undo->withdraw_clock = buf_withdraw_clock; undo->guess_block = undo_block;
mtr_commit(&mtr); mtr_commit(&mtr);
undo->empty = FALSE; undo->empty = FALSE;
undo->top_page_no = page_no; undo->top_page_no = page_no;
undo->top_offset = offset; undo->top_offset = offset;
undo->top_undo_no = trx->undo_no++; undo->top_undo_no = trx->undo_no++;
undo->guess_block = undo_block;
trx->undo_rseg_space = rseg->space; trx->undo_rseg_space = rseg->space;
......
...@@ -1338,7 +1338,6 @@ trx_undo_mem_create( ...@@ -1338,7 +1338,6 @@ trx_undo_mem_create(
undo->empty = TRUE; undo->empty = TRUE;
undo->top_page_no = page_no; undo->top_page_no = page_no;
undo->guess_block = NULL; undo->guess_block = NULL;
undo->withdraw_clock = 0;
return(undo); return(undo);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment