MDEV-23456 fil_space_crypt_t::write_page0() is accessing an uninitialized page

buf_page_create() is invoked when page is initialized. So that
previous contents of the page ignored. In few cases, it calls
buf_page_get_gen() is called to fetch the page from buffer pool.
It should take x-latch on the page. If other thread uses the block
or block io state is different from BUF_IO_NONE then release the
mutex and check the state and buffer fix count again. For compressed
page, use the existing free block from LRU list to create new page.
Retry to fetch the compressed page if it is in flush list

fseg_create(), fseg_create_general(): Introduce block as a parameter
where segment header is placed. It is used to avoid repetitive
x-latch on the same page

Change the assert to check whether the page has SX latch and
X latch in all callee function of buf_page_create()

mtr_t::get_fix_count(): Get the buffer fix count of the given
block added by the mtr

FindBlock is added to find the buffer fix count of the given
block acquired by the mini-transaction
parent f99cace7
......@@ -1086,8 +1086,7 @@ btr_create(
if (type & DICT_IBUF) {
/* Allocate first the ibuf header page */
buf_block_t* ibuf_hdr_block = fseg_create(
space, 0,
IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr);
space, IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr);
if (ibuf_hdr_block == NULL) {
return(FIL_NULL);
......@@ -1118,7 +1117,7 @@ btr_create(
flst_init(block->frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
mtr);
} else {
block = fseg_create(space, 0,
block = fseg_create(space,
PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr);
if (block == NULL) {
......@@ -1127,8 +1126,9 @@ btr_create(
buf_block_dbg_add_level(block, SYNC_TREE_NODE_NEW);
if (!fseg_create(space, block->page.id.page_no(),
PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr)) {
if (!fseg_create(space,
PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr,
block)) {
/* Not enough space for new segment, free root
segment before return. */
btr_free_root(block, mtr,
......
......@@ -5564,14 +5564,13 @@ buf_page_create(
buf_frame_t* frame;
buf_block_t* block;
buf_block_t* free_block = NULL;
buf_pool_t* buf_pool = buf_pool_get(page_id);
buf_pool_t* buf_pool= buf_pool_get(page_id);
rw_lock_t* hash_lock;
ut_ad(mtr->is_active());
ut_ad(page_id.space() != 0 || !page_size.is_compressed());
loop:
free_block = buf_LRU_get_free_block(buf_pool);
buf_pool_mutex_enter(buf_pool);
hash_lock = buf_page_hash_lock_get(buf_pool, page_id);
......@@ -5583,20 +5582,67 @@ buf_page_create(
&& buf_page_in_file(&block->page)
&& !buf_pool_watch_is_sentinel(buf_pool, &block->page)) {
ut_d(block->page.file_page_was_freed = FALSE);
buf_page_state page_state = buf_block_get_state(block);
#ifdef BTR_CUR_HASH_ADAPT
bool drop_hash_entry =
(block->page.state == BUF_BLOCK_FILE_PAGE
&& block->index);
const dict_index_t *drop_hash_entry= NULL;
#endif
switch (page_state) {
default:
ut_ad(0);
break;
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
buf_block_init_low(free_block);
mutex_enter(&buf_pool->zip_mutex);
if (drop_hash_entry) {
mutex_enter(&block->mutex);
/* Avoid a hang if I/O is going on. Release
the buffer pool mutex and page hash lock
and wait for I/O to complete */
while (buf_block_get_io_fix(block) != BUF_IO_NONE) {
buf_block_fix(block);
mutex_exit(&block->mutex);
buf_page_mutex_enter(free_block);
if (buf_page_get_io_fix(&block->page) != BUF_IO_NONE) {
mutex_exit(&buf_pool->zip_mutex);
rw_lock_x_unlock(hash_lock);
buf_LRU_block_free_non_file_page(free_block);
buf_pool_mutex_exit(buf_pool);
buf_page_mutex_exit(free_block);
goto loop;
}
rw_lock_x_lock(&free_block->lock);
buf_relocate(&block->page, &free_block->page);
if (page_state == BUF_BLOCK_ZIP_DIRTY) {
ut_ad(block->page.in_flush_list);
ut_ad(block->page.oldest_modification > 0);
buf_flush_relocate_on_flush_list(
&block->page, &free_block->page);
} else {
ut_ad(block->page.oldest_modification == 0);
ut_ad(!block->page.in_flush_list);
#ifdef UNIV_DEBUG
UT_LIST_REMOVE(
buf_pool->zip_clean, &block->page);
#endif
}
free_block->page.state = BUF_BLOCK_FILE_PAGE;
mutex_exit(&buf_pool->zip_mutex);
free_block->lock_hash_val = lock_rec_hash(
page_id.space(), page_id.page_no());
buf_unzip_LRU_add_block(free_block, false);
buf_page_free_descriptor(&block->page);
block = free_block;
buf_block_fix(block);
buf_page_mutex_exit(free_block);
free_block = NULL;
break;
case BUF_BLOCK_FILE_PAGE:
buf_block_fix(block);
const int32_t num_fix_count =
mtr->get_fix_count(block) + 1;
buf_page_mutex_enter(block);
while (buf_block_get_io_fix(block) != BUF_IO_NONE
|| (num_fix_count
!= block->page.buf_fix_count)) {
buf_page_mutex_exit(block);
buf_pool_mutex_exit(buf_pool);
rw_lock_x_unlock(hash_lock);
......@@ -5604,33 +5650,39 @@ buf_page_create(
buf_pool_mutex_enter(buf_pool);
rw_lock_x_lock(hash_lock);
mutex_enter(&block->mutex);
buf_block_unfix(block);
buf_page_mutex_enter(block);
}
rw_lock_x_lock(&block->lock);
mutex_exit(&block->mutex);
}
buf_page_mutex_exit(block);
#ifdef BTR_CUR_HASH_ADAPT
drop_hash_entry = block->index;
#endif
break;
}
/* Page can be found in buf_pool */
buf_pool_mutex_exit(buf_pool);
rw_lock_x_unlock(hash_lock);
buf_block_free(free_block);
if (free_block) {
buf_block_free(free_block);
}
#ifdef BTR_CUR_HASH_ADAPT
if (drop_hash_entry) {
btr_search_drop_page_hash_index(block);
rw_lock_x_unlock(&block->lock);
}
#endif /* BTR_CUR_HASH_ADAPT */
if (!recv_recovery_is_on()) {
return buf_page_get_with_no_latch(page_id, page_size,
mtr);
#ifdef UNIV_DEBUG
if (!fsp_is_system_temporary(page_id.space())) {
rw_lock_s_lock_nowait(
&block->debug_latch,
__FILE__, __LINE__);
}
#endif /* UNIV_DEBUG */
mtr_memo_push(mtr, block, MTR_MEMO_PAGE_X_FIX);
mutex_exit(&recv_sys->mutex);
block = buf_page_get_with_no_latch(page_id, page_size, mtr);
mutex_enter(&recv_sys->mutex);
return block;
}
......@@ -5645,6 +5697,8 @@ buf_page_create(
buf_page_init(buf_pool, page_id, page_size, block);
rw_lock_x_lock(&block->lock);
rw_lock_x_unlock(hash_lock);
/* The block must be put to the LRU list */
......@@ -5662,7 +5716,6 @@ buf_page_create(
by IO-fixing and X-latching the block. */
buf_page_set_io_fix(&block->page, BUF_IO_READ);
rw_lock_x_lock(&block->lock);
buf_page_mutex_exit(block);
/* buf_pool->mutex may be released and reacquired by
......@@ -5684,12 +5737,11 @@ buf_page_create(
buf_unzip_LRU_add_block(block, FALSE);
buf_page_set_io_fix(&block->page, BUF_IO_NONE);
rw_lock_x_unlock(&block->lock);
}
buf_pool_mutex_exit(buf_pool);
mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
mtr_memo_push(mtr, block, MTR_MEMO_PAGE_X_FIX);
buf_page_set_accessed(&block->page);
......
......@@ -170,6 +170,7 @@ buf_dblwr_create()
{
buf_block_t* block2;
buf_block_t* new_block;
buf_block_t* trx_sys_block;
byte* doublewrite;
byte* fseg_header;
ulint page_no;
......@@ -209,9 +210,14 @@ buf_dblwr_create()
}
}
block2 = fseg_create(TRX_SYS_SPACE, TRX_SYS_PAGE_NO,
trx_sys_block = buf_page_get(
page_id_t(TRX_SYS_SPACE, TRX_SYS_PAGE_NO),
page_size_t(srv_page_size, srv_page_size, 0), RW_X_LATCH,
&mtr);
block2 = fseg_create(TRX_SYS_SPACE,
TRX_SYS_DOUBLEWRITE
+ TRX_SYS_DOUBLEWRITE_FSEG, &mtr);
+ TRX_SYS_DOUBLEWRITE_FSEG, &mtr, trx_sys_block);
if (block2 == NULL) {
too_small:
......
......@@ -179,7 +179,7 @@ dict_hdr_create(
/* Create the dictionary header file block in a new, allocated file
segment in the system tablespace */
block = fseg_create(DICT_HDR_SPACE, 0,
block = fseg_create(DICT_HDR_SPACE,
DICT_HDR + DICT_HDR_FSEG_HEADER, mtr);
ut_a(DICT_HDR_PAGE_NO == block->page.id.page_no());
......
......@@ -117,7 +117,6 @@ to minimize file space fragmentation.
@param[in] direction if the new page is needed because of
an index page split, and records are inserted there in order, into which
direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction
@param[in,out] init_mtr mtr or another mini-transaction in
which the page should be initialized. If init_mtr != mtr, but the page is
......@@ -136,7 +135,6 @@ fseg_alloc_free_page_low(
fseg_inode_t* seg_inode,
ulint hint,
byte direction,
rw_lock_type_t rw_latch,
mtr_t* mtr,
mtr_t* init_mtr
#ifdef UNIV_DEBUG
......@@ -224,7 +222,9 @@ xdes_set_bit(
ulint bit_index;
ulint descr_byte;
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page(
mtr, descr,
MTR_MEMO_PAGE_SX_FIX | MTR_MEMO_PAGE_X_FIX));
ut_ad((bit == XDES_FREE_BIT) || (bit == XDES_CLEAN_BIT));
ut_ad(offset < FSP_EXTENT_SIZE);
......@@ -351,7 +351,9 @@ xdes_set_state(
ut_ad(descr && mtr);
ut_ad(state >= XDES_FREE);
ut_ad(state <= XDES_FSEG);
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page(
mtr, descr,
MTR_MEMO_PAGE_SX_FIX | MTR_MEMO_PAGE_X_FIX));
mlog_write_ulint(descr + XDES_STATE, state, MLOG_4BYTES, mtr);
}
......@@ -388,7 +390,9 @@ xdes_init(
ulint i;
ut_ad(descr && mtr);
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page(
mtr, descr,
MTR_MEMO_PAGE_SX_FIX | MTR_MEMO_PAGE_X_FIX));
ut_ad((XDES_SIZE - XDES_BITMAP) % 4 == 0);
for (i = XDES_BITMAP; i < XDES_SIZE; i += 4) {
......@@ -423,7 +427,8 @@ xdes_get_descriptor_with_space_hdr(
ulint descr_page_no;
page_t* descr_page;
ut_ad(mtr_memo_contains(mtr, space, MTR_MEMO_SPACE_X_LOCK));
ut_ad(mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_SX_FIX)
|| mtr_memo_contains_page(mtr, sp_header, MTR_MEMO_PAGE_X_FIX));
ut_ad(page_offset(sp_header) == FSP_HEADER_OFFSET);
/* Read free limit and space size */
limit = mach_read_from_4(sp_header + FSP_FREE_LIMIT);
......@@ -701,7 +706,6 @@ fsp_header_init(ulint space_id, ulint size, mtr_t* mtr)
const page_size_t page_size(space->flags);
block = buf_page_create(page_id, page_size, mtr);
buf_page_get(page_id, page_size, RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
space->size_in_header = size;
......@@ -1100,9 +1104,6 @@ fsp_fill_free_list(
block = buf_page_create(
page_id, page_size, mtr);
buf_page_get(
page_id, page_size, RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
fsp_init_file_page(space, block, mtr);
......@@ -1120,9 +1121,6 @@ fsp_fill_free_list(
block = buf_page_create(
page_id, page_size, mtr);
buf_page_get(
page_id, page_size, RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
fsp_init_file_page(space, block, mtr);
......@@ -1268,7 +1266,6 @@ x-latched only by mtr, and freed in mtr in that case.
@param[in,out] space tablespace
@param[in] offset page number of the allocated page
@param[in] page_size page size of the allocated page
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction of the allocation
@param[in,out] init_mtr mini-transaction for initializing the page
@return block, initialized if init_mtr==mtr
......@@ -1279,7 +1276,6 @@ fsp_page_create(
fil_space_t* space,
page_no_t offset,
const page_size_t& page_size,
rw_lock_type_t rw_latch,
mtr_t* mtr,
mtr_t* init_mtr)
{
......@@ -1289,26 +1285,10 @@ fsp_page_create(
page_size, init_mtr);
ut_d(bool latched = mtr_memo_contains_flagged(mtr, block,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_ad(rw_latch == RW_X_LATCH || rw_latch == RW_SX_LATCH);
/* Mimic buf_page_get(), but avoid the buf_pool->page_hash lookup. */
if (rw_latch == RW_X_LATCH) {
rw_lock_x_lock(&block->lock);
} else {
rw_lock_sx_lock(&block->lock);
}
buf_block_buf_fix_inc(block, __FILE__, __LINE__);
mtr_memo_push(init_mtr, block, rw_latch == RW_X_LATCH
? MTR_MEMO_PAGE_X_FIX : MTR_MEMO_PAGE_SX_FIX);
MTR_MEMO_PAGE_X_FIX));
if (init_mtr == mtr
|| (rw_latch == RW_X_LATCH
? rw_lock_get_x_lock_count(&block->lock) == 1
: rw_lock_get_sx_lock_count(&block->lock) == 1)) {
|| rw_lock_get_x_lock_count(&block->lock) == 1) {
/* Initialize the page, unless it was already
SX-latched in mtr. (In this case, we would want to
......@@ -1325,7 +1305,6 @@ The page is marked as used.
@param[in,out] space tablespace
@param[in] page_size page size
@param[in] hint hint of which page would be desirable
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction
@param[in,out] init_mtr mini-transaction in which the page should be
initialized (may be the same as mtr)
......@@ -1339,7 +1318,6 @@ fsp_alloc_free_page(
fil_space_t* space,
const page_size_t& page_size,
ulint hint,
rw_lock_type_t rw_latch,
mtr_t* mtr,
mtr_t* init_mtr)
{
......@@ -1431,8 +1409,7 @@ fsp_alloc_free_page(
}
fsp_alloc_from_free_frag(header, descr, free, mtr);
return(fsp_page_create(space, page_no, page_size, rw_latch,
mtr, init_mtr));
return(fsp_page_create(space, page_no, page_size, mtr, init_mtr));
}
/** Frees a single page of a space.
......@@ -1669,8 +1646,7 @@ fsp_alloc_seg_inode_page(
const page_size_t page_size(space->flags);
block = fsp_alloc_free_page(
space, page_size, 0, RW_SX_LATCH, mtr, mtr);
block = fsp_alloc_free_page(space, page_size, 0, mtr, mtr);
if (block == NULL) {
......@@ -1678,7 +1654,7 @@ fsp_alloc_seg_inode_page(
}
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
ut_ad(rw_lock_get_sx_lock_count(&block->lock) == 1);
ut_ad(rw_lock_get_x_lock_count(&block->lock) == 1);
mlog_write_ulint(block->frame + FIL_PAGE_TYPE, FIL_PAGE_INODE,
MLOG_2BYTES, mtr);
......@@ -1987,32 +1963,36 @@ fseg_get_n_frag_pages(
return(count);
}
/**********************************************************************//**
Creates a new segment.
/** Creates a new segment.
@param[in] space_id space_id
@param[in] byte_offset byte offset of the created segment
header on the page
@param[in] has_done_reservation TRUE if the caller has already
done the reservation for the pages
with fsp_reserve_free_externts
(at least 2 extents: one for
the inode and the other for the
segment) then there is no need to do
the check for this individual
operation
@param[in,out] mtr mini-transaction
@param[in] block block where the segment header is
placed. If it is null then new page
will be allocated and it will belong
to the created segment
@return the block where the segment header is placed, x-latched, NULL
if could not create segment because of lack of space */
buf_block_t*
fseg_create_general(
/*================*/
ulint space_id,/*!< in: space id */
ulint page, /*!< in: page where the segment header is placed: if
this is != 0, the page must belong to another segment,
if this is 0, a new page will be allocated and it
will belong to the created segment */
ulint byte_offset, /*!< in: byte offset of the created segment header
on the page */
ibool has_done_reservation, /*!< in: TRUE if the caller has already
done the reservation for the pages with
fsp_reserve_free_extents (at least 2 extents: one for
the inode and the other for the segment) then there is
no need to do the check for this individual
operation */
mtr_t* mtr) /*!< in/out: mini-transaction */
ulint space_id,
ulint byte_offset,
ibool has_done_reservation,
mtr_t* mtr,
buf_block_t* block)
{
fsp_header_t* space_header;
fseg_inode_t* inode;
ib_id_t seg_id;
buf_block_t* block = 0; /* remove warning */
fseg_header_t* header = 0; /* remove warning */
ulint n_reserved;
ulint i;
......@@ -2027,14 +2007,12 @@ fseg_create_general(
const page_size_t page_size(space->flags);
ut_d(space->modify_check(*mtr));
if (page != 0) {
block = buf_page_get(page_id_t(space_id, page), page_size,
RW_SX_LATCH, mtr);
if (block) {
header = byte_offset + buf_block_get_frame(block);
const ulint type = space_id == TRX_SYS_SPACE
&& page == TRX_SYS_PAGE_NO
const ulint type =
(block->page.id.space() == TRX_SYS_SPACE
&& block->page.id.page_no() == TRX_SYS_PAGE_NO)
? FIL_PAGE_TYPE_TRX_SYS
: FIL_PAGE_TYPE_SYS;
......@@ -2075,9 +2053,9 @@ fseg_create_general(
fseg_set_nth_frag_page_no(inode, i, FIL_NULL, mtr);
}
if (page == 0) {
if (!block) {
block = fseg_alloc_free_page_low(space, page_size,
inode, 0, FSP_UP, RW_SX_LATCH,
inode, 0, FSP_UP,
mtr, mtr
#ifdef UNIV_DEBUG
, has_done_reservation
......@@ -2095,7 +2073,7 @@ fseg_create_general(
goto funct_exit;
}
ut_ad(rw_lock_get_sx_lock_count(&block->lock) == 1);
ut_ad(rw_lock_get_x_lock_count(&block->lock) == 1);
header = byte_offset + buf_block_get_frame(block);
mlog_write_ulint(buf_block_get_frame(block) + FIL_PAGE_TYPE,
......@@ -2120,23 +2098,25 @@ fseg_create_general(
DBUG_RETURN(block);
}
/**********************************************************************//**
Creates a new segment.
/** Creates a new segment.
@param[in] space space id
@param[in] byte_offset byte offset of the created segment header
on the page
@param[in,out] mtr mini-transaction
@param[in,out] block block where segment header is placed;
If it is null then new page will be
allocated and it will belong to
the created segment
@return the block where the segment header is placed, x-latched, NULL
if could not create segment because of lack of space */
buf_block_t*
fseg_create(
/*========*/
ulint space, /*!< in: space id */
ulint page, /*!< in: page where the segment header is placed: if
this is != 0, the page must belong to another segment,
if this is 0, a new page will be allocated and it
will belong to the created segment */
ulint byte_offset, /*!< in: byte offset of the created segment header
on the page */
mtr_t* mtr) /*!< in/out: mini-transaction */
ulint space,
ulint byte_offset,
mtr_t* mtr,
buf_block_t* block)
{
return(fseg_create_general(space, page, byte_offset, FALSE, mtr));
return(fseg_create_general(space, byte_offset, FALSE, mtr, block));
}
/**********************************************************************//**
......@@ -2334,7 +2314,6 @@ minimize file space fragmentation.
@param[in] direction if the new page is needed because of
an index page split, and records are inserted there in order, into which
direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction
@param[in,out] init_mtr mtr or another mini-transaction in
which the page should be initialized. If init_mtr != mtr, but the page is
......@@ -2353,7 +2332,6 @@ fseg_alloc_free_page_low(
fseg_inode_t* seg_inode,
ulint hint,
byte direction,
rw_lock_type_t rw_latch,
mtr_t* mtr,
mtr_t* init_mtr
#ifdef UNIV_DEBUG
......@@ -2496,7 +2474,7 @@ fseg_alloc_free_page_low(
/* 6. We allocate an individual page from the space
===================================================*/
buf_block_t* block = fsp_alloc_free_page(
space, page_size, hint, rw_latch, mtr, init_mtr);
space, page_size, hint, mtr, init_mtr);
ut_ad(!has_done_reservation || block != NULL);
......@@ -2577,8 +2555,7 @@ fseg_alloc_free_page_low(
fseg_mark_page_used(seg_inode, ret_page, ret_descr, mtr);
}
return(fsp_page_create(space, ret_page, page_size, rw_latch,
mtr, init_mtr));
return(fsp_page_create(space, ret_page, page_size, mtr, init_mtr));
}
/**********************************************************************//**
......@@ -2633,7 +2610,7 @@ fseg_alloc_free_page_general(
block = fseg_alloc_free_page_low(space, page_size,
inode, hint, direction,
RW_X_LATCH, mtr, init_mtr
mtr, init_mtr
#ifdef UNIV_DEBUG
, has_done_reservation
#endif /* UNIV_DEBUG */
......@@ -3388,7 +3365,7 @@ fseg_print_low(
ulint page_no;
ib_id_t seg_id;
ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_X_FIX));
space = page_get_space_id(page_align(inode));
page_no = page_get_page_no(page_align(inode));
......
......@@ -407,42 +407,52 @@ fsp_header_inc_size(
ulint space_id, /*!< in: space id */
ulint size_inc, /*!< in: size increment in pages */
mtr_t* mtr); /*!< in/out: mini-transaction */
/**********************************************************************//**
Creates a new segment.
/** Creates a new segment.
@param[in] space space id
@param[in] byte_offset byte offset of the created segment header
on the page
@param[in,out] mtr mini-transaction
@param[in,out] block block where segment header is placed;
If it is null then new page will be
allocated and it will belong to
the created segment
@return the block where the segment header is placed, x-latched, NULL
if could not create segment because of lack of space */
buf_block_t*
fseg_create(
/*========*/
ulint space_id,/*!< in: space id */
ulint page, /*!< in: page where the segment header is placed: if
this is != 0, the page must belong to another segment,
if this is 0, a new page will be allocated and it
will belong to the created segment */
ulint byte_offset, /*!< in: byte offset of the created segment header
on the page */
mtr_t* mtr); /*!< in/out: mini-transaction */
/**********************************************************************//**
Creates a new segment.
ulint space,
ulint byte_offset,
mtr_t* mtr,
buf_block_t* block=NULL);
/** Creates a new segment.
@param[in] space_id space_id
@param[in] byte_offset byte offset of the created segment
header on the page
@param[in] has_done_reservation TRUE if the caller has already
done the reservation for the pages
with fsp_reserve_free_externts
(at least 2 extents: one for
the inode and the other for the
segment) then there is no need to do
the check for this individual
operation
@param[in,out] mtr mini-transaction
@param[in] block block where the segment header is
placed. If it is null then new page
will be allocated and it will belong
to the created segment
@return the block where the segment header is placed, x-latched, NULL
if could not create segment because of lack of space */
buf_block_t*
fseg_create_general(
/*================*/
ulint space_id,/*!< in: space id */
ulint page, /*!< in: page where the segment header is placed: if
this is != 0, the page must belong to another segment,
if this is 0, a new page will be allocated and it
will belong to the created segment */
ulint byte_offset, /*!< in: byte offset of the created segment header
on the page */
ibool has_done_reservation, /*!< in: TRUE if the caller has already
done the reservation for the pages with
fsp_reserve_free_extents (at least 2 extents: one for
the inode and the other for the segment) then there is
no need to do the check for this individual
operation */
mtr_t* mtr); /*!< in/out: mini-transaction */
ulint space_id,
ulint byte_offset,
ibool has_done_reservation,
mtr_t* mtr,
buf_block_t* block);
/**********************************************************************//**
Calculates the number of pages reserved by a segment, and how many pages are
currently used.
......
......@@ -434,6 +434,10 @@ struct mtr_t {
static inline bool is_block_dirtied(const buf_block_t* block)
MY_ATTRIBUTE((warn_unused_result));
/** Get the buffer fix count for the block added by this mtr.
@param[in] block block to be checked
@return number of buffer count added by this mtr */
int32_t get_fix_count(buf_block_t *block);
private:
/** Look up the system tablespace. */
void lookup_sys_space();
......
......@@ -2362,7 +2362,6 @@ static buf_block_t* recv_recovery_create_page_low(const page_id_t page_id,
{
i.created = true;
buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
mtr.x_latch_at_savepoint(0, block);
recv_recover_page(block, mtr, recv_addr, i.lsn);
ut_ad(mtr.has_committed());
}
......
......@@ -308,6 +308,32 @@ struct DebugCheck {
};
#endif
/** Find buffer fix count of the given block acquired by the
mini-transaction */
struct FindBlock
{
int32_t num_fix;
buf_block_t *block;
FindBlock(buf_block_t *block_buf): num_fix(0), block(block_buf) {}
bool operator()(const mtr_memo_slot_t* slot)
{
if (slot->object != NULL)
{
buf_block_t *mtr_block= reinterpret_cast<buf_block_t*>(slot->object);
if (mtr_block == block)
num_fix++;
}
return true;
}
int32_t get_num_fix()
{
return num_fix;
}
};
/** Release a resource acquired by the mini-transaction. */
struct ReleaseBlocks {
/** Release specific object */
......@@ -804,6 +830,15 @@ mtr_t::release_free_extents(ulint n_reserved)
space->release_free_extents(n_reserved);
}
int32_t mtr_t::get_fix_count(buf_block_t *block)
{
struct FindBlock find_block(block);
Iterate<FindBlock> iteration(find_block);
if (m_memo.for_each_block(iteration))
return iteration.functor.get_num_fix();
return 0;
}
#ifdef UNIV_DEBUG
/** Check if memo contains the given item.
@return true if contains */
......
......@@ -57,7 +57,7 @@ trx_rseg_header_create(
MTR_MEMO_SPACE_X_LOCK));
/* Allocate a new file segment for the rollback segment */
block = fseg_create(space, 0, TRX_RSEG + TRX_RSEG_FSEG_HEADER, mtr);
block = fseg_create(space, TRX_RSEG + TRX_RSEG_FSEG_HEADER, mtr);
if (block == NULL) {
/* No space left */
......
......@@ -422,7 +422,7 @@ trx_sysf_create(
mtr_x_lock_space(TRX_SYS_SPACE, mtr);
/* Create the trx sys file block in a new allocated file segment */
block = fseg_create(TRX_SYS_SPACE, 0, TRX_SYS + TRX_SYS_FSEG_HEADER,
block = fseg_create(TRX_SYS_SPACE, TRX_SYS + TRX_SYS_FSEG_HEADER,
mtr);
buf_block_dbg_add_level(block, SYNC_TRX_SYS_HEADER);
......
......@@ -446,9 +446,9 @@ trx_undo_seg_create(
}
/* Allocate a new file segment for the undo log */
block = fseg_create_general(space, 0,
block = fseg_create_general(space,
TRX_UNDO_SEG_HDR
+ TRX_UNDO_FSEG_HEADER, TRUE, mtr);
+ TRX_UNDO_FSEG_HEADER, TRUE, mtr, NULL);
fil_space_release_free_extents(space, n_reserved);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment