Commit 17a7bafe authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-22110 preparation: Remove mtr_memo_contains macros

Let us invoke the debug member functions of mtr_t directly.

mtr_t::memo_contains(): Change the parameter type to
const rw_lock_t&. This function cannot be invoked on
buf_block_t::lock.

The function mtr_t::memo_contains_flagged() is intended to be invoked
on buf_block_t* or rw_lock_t*, and it along with
mtr_t::memo_contains_page_flagged() are the way to check whether
a buffer pool page has been latched within a mini-transaction.
parent d6f8c484
......@@ -287,10 +287,9 @@ btr_height_get(
buf_block_t* root_block;
ut_ad(srv_read_only_mode
|| mtr_memo_contains_flagged(mtr, dict_index_get_lock(index),
MTR_MEMO_S_LOCK
| MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
|| mtr->memo_contains_flagged(&index->lock, MTR_MEMO_S_LOCK
| MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
/* S latches the page */
root_block = btr_root_block_get(index, RW_S_LATCH, mtr);
......@@ -438,7 +437,7 @@ btr_page_create(
ulint level, /*!< in: the B-tree level of the page */
mtr_t* mtr) /*!< in: mtr */
{
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
byte *index_id= my_assume_aligned<2>(PAGE_HEADER + PAGE_INDEX_ID +
block->frame);
......@@ -607,8 +606,7 @@ btr_get_size(
ulint n=0;
ut_ad(srv_read_only_mode
|| mtr_memo_contains(mtr, dict_index_get_lock(index),
MTR_MEMO_S_LOCK));
|| mtr->memo_contains(index->lock, MTR_MEMO_S_LOCK));
ut_ad(flag == BTR_N_LEAF_PAGES || flag == BTR_TOTAL_SIZE);
if (index->page == FIL_NULL
......@@ -654,9 +652,7 @@ btr_get_size_and_reserved(
{
ulint dummy;
ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
MTR_MEMO_S_LOCK));
ut_ad(mtr->memo_contains(index->lock, MTR_MEMO_S_LOCK));
ut_a(flag == BTR_N_LEAF_PAGES || flag == BTR_TOTAL_SIZE);
if (index->page == FIL_NULL
......@@ -697,7 +693,7 @@ btr_page_free_for_ibuf(
buf_block_t* block, /*!< in: block to be freed, x-latched */
mtr_t* mtr) /*!< in: mtr */
{
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
buf_block_t* root = btr_root_block_get(index, RW_SX_LATCH, mtr);
......@@ -715,7 +711,7 @@ btr_page_free_for_ibuf(
void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
bool blob)
{
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
#ifdef BTR_CUR_HASH_ADAPT
if (block->index && !block->index->freed()) {
ut_ad(!blob);
......@@ -752,7 +748,7 @@ void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
/* The page was marked free in the allocation bitmap, but it
should remain exclusively latched until mtr_t::commit() or until it
is explicitly freed from the mini-transaction. */
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
/* MDEV-15528 FIXME: Zero out the page after the redo log for
this mini-transaction has been durably written.
......@@ -841,9 +837,8 @@ btr_page_get_father_node_ptr_func(
ut_ad(!dict_index_is_spatial(index));
ut_ad(srv_read_only_mode
|| mtr_memo_contains_flagged(mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
|| mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
ut_ad(dict_index_get_page(index) != page_no);
......@@ -968,8 +963,8 @@ before mtr.commit().
@param[in] invalidate whether to invalidate PAGE_INDEX_ID */
static void btr_free_root(buf_block_t *block, mtr_t *mtr, bool invalidate)
{
ut_ad(mtr_memo_contains_flagged(mtr, block,
MTR_MEMO_PAGE_X_FIX | MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->is_named_space(block->page.id().space()));
btr_search_drop_page_hash_index(block);
......@@ -1384,7 +1379,7 @@ static void btr_page_reorganize_low(page_cur_t *cursor, dict_index_t *index,
buf_block_t *const block= cursor->block;
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(!is_buf_block_get_page_zip(block));
btr_assert_not_corrupted(block, index);
ut_ad(fil_page_index_page_check(block->frame));
......@@ -1693,7 +1688,7 @@ btr_page_empty(
ulint level,
mtr_t* mtr)
{
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(page_zip == buf_block_get_page_zip(block));
ut_ad(!index->is_dummy);
ut_ad(index->table->space->id == block->page.id().space());
......@@ -1895,10 +1890,9 @@ btr_root_raise_and_insert(
ut_a(dict_index_get_page(index) == root->page.id().page_no());
#endif /* UNIV_BTR_DEBUG */
ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
ut_ad(mtr_memo_contains(mtr, root, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
ut_ad(mtr->memo_contains_flagged(root, MTR_MEMO_PAGE_X_FIX));
/* Allocate a new page to the tree. Root splitting is done by first
moving the root records to the new page, emptying the root, putting
......@@ -2490,8 +2484,8 @@ btr_attach_half_pages(
buf_block_t* lower_block;
buf_block_t* upper_block;
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr_memo_contains(mtr, new_block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(new_block, MTR_MEMO_PAGE_X_FIX));
/* Create a memory heap where the data tuple is stored */
heap = mem_heap_create(1024);
......@@ -2653,10 +2647,9 @@ btr_insert_into_right_sibling(
page_t* page = buf_block_get_frame(block);
const uint32_t next_page_no = btr_page_get_next(page);
ut_ad(mtr_memo_contains_flagged(
mtr, dict_index_get_lock(cursor->index),
MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK));
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(&cursor->index->lock,
MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(heap);
if (next_page_no == FIL_NULL || !page_rec_is_supremum(
......@@ -2815,9 +2808,8 @@ btr_page_split_and_insert(
mem_heap_empty(*heap);
*offsets = NULL;
ut_ad(mtr_memo_contains_flagged(mtr,
dict_index_get_lock(cursor->index),
MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK));
ut_ad(mtr->memo_contains_flagged(&cursor->index->lock, MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
ut_ad(!dict_index_is_online_ddl(cursor->index)
|| (flags & BTR_CREATE_FLAG)
|| dict_index_is_clust(cursor->index));
......@@ -2828,7 +2820,7 @@ btr_page_split_and_insert(
page = buf_block_get_frame(block);
page_zip = buf_block_get_page_zip(block);
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(!page_is_empty(page));
/* try to insert to the next page if possible before split */
......@@ -3177,7 +3169,7 @@ btr_page_split_and_insert(
void btr_level_list_remove(const buf_block_t& block, const dict_index_t& index,
mtr_t* mtr)
{
ut_ad(mtr_memo_contains(mtr, &block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(&block, MTR_MEMO_PAGE_X_FIX));
ut_ad(block.zip_size() == index.table->space->zip_size());
ut_ad(index.table->space->id == block.page.id().space());
/* Get the previous and next page numbers of page */
......@@ -3246,7 +3238,7 @@ btr_lift_page_up(
buf_block_t* block_orig = block;
ut_ad(!page_has_siblings(page));
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
page_level = btr_page_get_level(page);
root_page_no = dict_index_get_page(index);
......@@ -3313,8 +3305,8 @@ btr_lift_page_up(
page_level = btr_page_get_level(page);
ut_ad(!page_has_siblings(page));
ut_ad(mtr_memo_contains(
mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block,
MTR_MEMO_PAGE_X_FIX));
father_block = blocks[0];
father_page_zip = buf_block_get_page_zip(father_block);
......@@ -3450,18 +3442,9 @@ btr_compress(
btr_assert_not_corrupted(block, index);
#ifdef UNIV_DEBUG
if (dict_index_is_spatial(index)) {
ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK));
} else {
ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
}
#endif /* UNIV_DEBUG */
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
MONITOR_INC(MONITOR_INDEX_MERGE_ATTEMPTS);
......@@ -3936,7 +3919,7 @@ btr_discard_only_page_on_level(
ut_a(!page_has_siblings(page));
ut_ad(fil_page_index_page_check(page));
ut_ad(block->page.id().space() == index->table->space->id);
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
btr_search_drop_page_hash_index(block);
if (dict_index_is_spatial(index)) {
......@@ -4046,10 +4029,9 @@ btr_discard_page(
ut_ad(dict_index_get_page(index) != block->page.id().page_no());
ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK));
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
MONITOR_INC(MONITOR_INDEX_DISCARD);
......@@ -4217,7 +4199,7 @@ btr_print_recursive(
ulint i = 0;
mtr_t mtr2;
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_SX_FIX));
ib::info() << "NODE ON LEVEL " << btr_page_get_level(page)
<< " page " << block->page.id;
......@@ -4311,7 +4293,7 @@ btr_check_node_ptr(
btr_cur_t cursor;
page_t* page = buf_block_get_frame(block);
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
if (dict_index_get_page(index) == block->page.id().page_no()) {
......
This diff is collapsed.
......@@ -117,13 +117,11 @@ btr_pcur_store_position(
/* For spatial index, when we do positioning on parent
buffer if necessary, it might not hold latches, but the
tree must be locked to prevent change on the page */
ut_ad(mtr_memo_contains_flagged(mtr, block,
MTR_MEMO_PAGE_S_FIX
| MTR_MEMO_PAGE_X_FIX)
|| (dict_index_is_spatial(index)
&& mtr_memo_contains_flagged(
mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK)));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_S_FIX
| MTR_MEMO_PAGE_X_FIX)
|| (index->is_spatial()
&& mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK)));
cursor->old_stored = true;
......
......@@ -387,7 +387,7 @@ dict_process_sys_tables_rec_and_mtr_commit(
ut_a(!rec_get_deleted_flag(rec, 0));
ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_S_FIX));
/* Get the table name */
table_name_t table_name(mem_heap_strdupl(heap, field, len));
......
......@@ -1020,8 +1020,7 @@ dict_stats_analyze_index_level(
DEBUG_PRINTF(" %s(table=%s, index=%s, level=" ULINTPF ")\n",
__func__, index->table->name, index->name, level);
ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
MTR_MEMO_SX_LOCK));
ut_ad(mtr->memo_contains(index->lock, MTR_MEMO_SX_LOCK));
n_uniq = dict_index_get_n_unique(index);
......@@ -1653,8 +1652,7 @@ dict_stats_analyze_index_for_n_prefix(
n_prefix, n_diff_data->n_diff_on_level);
#endif
ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
MTR_MEMO_SX_LOCK));
ut_ad(mtr->memo_contains(index->lock, MTR_MEMO_SX_LOCK));
/* Position pcur on the leftmost record on the leftmost page
on the desired level. */
......
......@@ -147,7 +147,7 @@ template<bool free>
inline void xdes_set_free(const buf_block_t &block, xdes_t *descr,
ulint offset, mtr_t *mtr)
{
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(&block, MTR_MEMO_PAGE_SX_FIX));
ut_ad(offset < FSP_EXTENT_SIZE);
ut_ad(page_align(descr) == block.frame);
compile_time_assert(XDES_BITS_PER_PAGE == 2);
......@@ -218,7 +218,7 @@ inline void xdes_set_state(const buf_block_t &block, xdes_t *descr,
ut_ad(descr && mtr);
ut_ad(state >= XDES_FREE);
ut_ad(state <= XDES_FSEG);
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(&block, MTR_MEMO_PAGE_SX_FIX));
ut_ad(page_align(descr) == block.frame);
ut_ad(mach_read_from_4(descr + XDES_STATE) <= XDES_FSEG);
mtr->write<1>(block, XDES_STATE + 3 + descr, state);
......@@ -245,7 +245,7 @@ xdes_get_state(
Inits an extent descriptor to the free and clean state. */
inline void xdes_init(const buf_block_t &block, xdes_t *descr, mtr_t *mtr)
{
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(&block, MTR_MEMO_PAGE_SX_FIX));
mtr->memset(&block, uint16_t(descr - block.frame) + XDES_BITMAP,
XDES_SIZE - XDES_BITMAP, 0xff);
xdes_set_state(block, descr, XDES_FREE, mtr);
......@@ -319,8 +319,8 @@ xdes_get_descriptor_with_space_hdr(
ulint limit;
ulint size;
ulint descr_page_no;
ut_ad(mtr_memo_contains(mtr, &space->latch, MTR_MEMO_X_LOCK));
ut_ad(mtr_memo_contains(mtr, header, MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains(space->latch, MTR_MEMO_X_LOCK));
ut_ad(mtr->memo_contains_flagged(header, MTR_MEMO_PAGE_SX_FIX));
/* Read free limit and space size */
limit = mach_read_from_4(FSP_HEADER_OFFSET + FSP_FREE_LIMIT
+ header->frame);
......@@ -403,7 +403,7 @@ xdes_get_descriptor_const(
page_no_t offset,
mtr_t* mtr)
{
ut_ad(mtr_memo_contains(mtr, &space->latch, MTR_MEMO_S_LOCK));
ut_ad(mtr->memo_contains(space->latch, MTR_MEMO_S_LOCK));
ut_ad(offset < space->free_limit);
ut_ad(offset < space->size_in_header);
......@@ -444,7 +444,7 @@ xdes_lst_get_descriptor(
buf_block_t** block,
mtr_t* mtr)
{
ut_ad(mtr_memo_contains(mtr, &space->latch, MTR_MEMO_X_LOCK));
ut_ad(mtr->memo_contains(space->latch, MTR_MEMO_X_LOCK));
return fut_get_ptr(space->id, space->zip_size(),
lst_node, RW_SX_LATCH, mtr, block)
- XDES_FLST_NODE;
......@@ -1293,7 +1293,7 @@ static void fsp_free_page(fil_space_t* space, page_no_t offset, mtr_t* mtr)
@param[in,out] mtr mini-transaction */
static void fsp_free_extent(fil_space_t* space, page_no_t offset, mtr_t* mtr)
{
ut_ad(mtr_memo_contains(mtr, &space->latch, MTR_MEMO_X_LOCK));
ut_ad(mtr->memo_contains(space->latch, MTR_MEMO_X_LOCK));
buf_block_t *block= fsp_get_header(space, mtr);
buf_block_t *xdes= 0;
......@@ -1584,7 +1584,7 @@ inline void fseg_set_nth_frag_page_no(fseg_inode_t *inode, buf_block_t *iblock,
ulint n, ulint page_no, mtr_t *mtr)
{
ut_ad(n < FSEG_FRAG_ARR_N_SLOTS);
ut_ad(mtr_memo_contains_page(mtr, inode, MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(iblock, MTR_MEMO_PAGE_SX_FIX));
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
mtr->write<4>(*iblock, inode + FSEG_FRAG_ARR + n * FSEG_FRAG_SLOT_SIZE,
......
......@@ -90,12 +90,10 @@ static void flst_add_to_empty(buf_block_t *base, uint16_t boffset,
ut_ad(base != add || boffset != aoffset);
ut_ad(boffset < base->physical_size());
ut_ad(aoffset < add->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(add, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(!mach_read_from_4(base->frame + boffset + FLST_LEN));
mtr->write<1>(*base, base->frame + boffset + (FLST_LEN + 3), 1U);
......@@ -132,15 +130,12 @@ static void flst_insert_after(buf_block_t *base, uint16_t boffset,
ut_ad(boffset < base->physical_size());
ut_ad(coffset < cur->physical_size());
ut_ad(aoffset < add->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, cur->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(cur, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(add, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
fil_addr_t next_addr= flst_get_next_addr(cur->frame + coffset);
......@@ -186,15 +181,12 @@ static void flst_insert_before(buf_block_t *base, uint16_t boffset,
ut_ad(boffset < base->physical_size());
ut_ad(coffset < cur->physical_size());
ut_ad(aoffset < add->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, cur->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(cur, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(add, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
fil_addr_t prev_addr= flst_get_prev_addr(cur->frame + coffset);
......@@ -247,12 +239,10 @@ void flst_add_last(buf_block_t *base, uint16_t boffset,
ut_ad(base != add || boffset != aoffset);
ut_ad(boffset < base->physical_size());
ut_ad(aoffset < add->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(add, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
if (!flst_get_len(base->frame + boffset))
flst_add_to_empty(base, boffset, add, aoffset, mtr);
......@@ -282,12 +272,10 @@ void flst_add_first(buf_block_t *base, uint16_t boffset,
ut_ad(base != add || boffset != aoffset);
ut_ad(boffset < base->physical_size());
ut_ad(aoffset < add->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, add->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(add, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
if (!flst_get_len(base->frame + boffset))
flst_add_to_empty(base, boffset, add, aoffset, mtr);
......@@ -316,12 +304,10 @@ void flst_remove(buf_block_t *base, uint16_t boffset,
{
ut_ad(boffset < base->physical_size());
ut_ad(coffset < cur->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr_memo_contains_page_flagged(mtr, cur->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(cur, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
const fil_addr_t prev_addr= flst_get_prev_addr(cur->frame + coffset);
const fil_addr_t next_addr= flst_get_next_addr(cur->frame + coffset);
......@@ -364,9 +350,8 @@ void flst_remove(buf_block_t *base, uint16_t boffset,
void flst_validate(const buf_block_t *base, uint16_t boffset, mtr_t *mtr)
{
ut_ad(boffset < base->physical_size());
ut_ad(mtr_memo_contains_page_flagged(mtr, base->frame,
MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
ut_ad(mtr->memo_contains_flagged(base, MTR_MEMO_PAGE_X_FIX |
MTR_MEMO_PAGE_SX_FIX));
/* We use two mini-transaction handles: the first is used to lock
the base node, and prevent other threads from modifying the list.
......
......@@ -915,8 +915,8 @@ rtr_page_split_and_insert(
mem_heap_empty(*heap);
*offsets = NULL;
ut_ad(mtr_memo_contains_flagged(mtr, dict_index_get_lock(cursor->index),
MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK));
ut_ad(mtr->memo_contains_flagged(&cursor->index->lock, MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
ut_ad(!dict_index_is_online_ddl(cursor->index)
|| (flags & BTR_CREATE_FLAG)
|| dict_index_is_clust(cursor->index));
......@@ -928,7 +928,7 @@ rtr_page_split_and_insert(
page_zip = buf_block_get_page_zip(block);
current_ssn = page_get_ssn_id(page);
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(page_get_n_recs(page) >= 1);
page_no = block->page.id().page_no();
......
......@@ -139,10 +139,10 @@ rtr_pcur_getnext_from_path(
|| latch_mode & BTR_MODIFY_LEAF);
mtr_s_lock_index(index, mtr);
} else {
ut_ad(mtr_memo_contains_flagged(mtr, &index->lock,
MTR_MEMO_SX_LOCK
| MTR_MEMO_S_LOCK
| MTR_MEMO_X_LOCK));
ut_ad(mtr->memo_contains_flagged(&index->lock,
MTR_MEMO_SX_LOCK
| MTR_MEMO_S_LOCK
| MTR_MEMO_X_LOCK));
}
const ulint zip_size = index->table->space->zip_size();
......@@ -599,15 +599,14 @@ rtr_pcur_open_low(
n_fields = dtuple_get_n_fields(tuple);
if (latch_mode & BTR_ALREADY_S_LATCHED) {
ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
MTR_MEMO_S_LOCK));
ut_ad(mtr->memo_contains(index->lock, MTR_MEMO_S_LOCK));
tree_latched = true;
}
if (latch_mode & BTR_MODIFY_TREE) {
ut_ad(mtr_memo_contains_flagged(mtr, &index->lock,
MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
ut_ad(mtr->memo_contains_flagged(&index->lock,
MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
tree_latched = true;
}
......@@ -707,11 +706,8 @@ static void rtr_get_father_node(
/* Try to optimally locate the parent node. Level should always
less than sea_cur->tree_height unless the root is splitting */
if (sea_cur && sea_cur->tree_height > level) {
ut_ad(mtr_memo_contains_flagged(mtr,
dict_index_get_lock(index),
MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
ut_ad(mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
ret = rtr_cur_restore_position(
BTR_CONT_MODIFY_TREE, sea_cur, level, mtr);
......@@ -824,8 +820,8 @@ rtr_page_get_father_node_ptr(
index = btr_cur_get_index(cursor);
ut_ad(srv_read_only_mode
|| mtr_memo_contains_flagged(mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK | MTR_MEMO_SX_LOCK));
|| mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
ut_ad(dict_index_get_page(index) != page_no);
......
......@@ -576,7 +576,7 @@ ibuf_bitmap_page_get_bits_low(
ut_ad(ut_is_2pow(zip_size));
ut_ad(bit < IBUF_BITS_PER_PAGE);
compile_time_assert(!(IBUF_BITS_PER_PAGE % 2));
ut_ad(mtr_memo_contains_page(mtr, page, latch_type));
ut_ad(mtr->memo_contains_page_flagged(page, latch_type));
bit_offset = (page_id.page_no() & (size - 1))
* IBUF_BITS_PER_PAGE + bit;
......@@ -620,7 +620,7 @@ ibuf_bitmap_page_set_bits(
static_assert(bit < IBUF_BITS_PER_PAGE, "wrong bit");
compile_time_assert(!(IBUF_BITS_PER_PAGE % 2));
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->is_named_space(page_id.space()));
bit_offset = (page_id.page_no() % physical_size)
......@@ -1046,9 +1046,8 @@ ibuf_rec_get_page_no_func(
const byte* field;
ulint len;
ut_ad(mtr_memo_contains_page_flagged(mtr, rec,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(ibuf_inside(mtr));
ut_ad(rec_get_n_fields_old(rec) > 2);
......@@ -1085,8 +1084,8 @@ ibuf_rec_get_space_func(
const byte* field;
ulint len;
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(ibuf_inside(mtr));
ut_ad(rec_get_n_fields_old(rec) > 2);
......@@ -1135,8 +1134,8 @@ ibuf_rec_get_info_func(
ulint info_len_local;
ulint counter_local;
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(ibuf_inside(mtr));
fields = rec_get_n_fields_old(rec);
ut_a(fields > IBUF_REC_FIELD_USER);
......@@ -1209,8 +1208,8 @@ ibuf_rec_get_op_type_func(
{
ulint len;
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(ibuf_inside(mtr));
ut_ad(rec_get_n_fields_old(rec) > 2);
......@@ -1399,8 +1398,8 @@ ibuf_build_entry_from_ibuf_rec_func(
ulint comp;
dict_index_t* index;
ut_ad(mtr_memo_contains_page_flagged(mtr, ibuf_rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(mtr->memo_contains_page_flagged(ibuf_rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(ibuf_inside(mtr));
data = rec_get_nth_field_old(ibuf_rec, IBUF_REC_FIELD_MARKER, &len);
......@@ -1524,8 +1523,8 @@ ibuf_rec_get_volume_func(
ibuf_op_t op;
ulint info_len;
ut_ad(mtr_memo_contains_page_flagged(mtr, ibuf_rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(mtr->memo_contains_page_flagged(ibuf_rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(ibuf_inside(mtr));
ut_ad(rec_get_n_fields_old(ibuf_rec) > 2);
......@@ -2063,8 +2062,8 @@ ibuf_get_merge_page_nos_func(
ulint limit;
ulint n_pages;
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(ibuf_inside(mtr));
*n_stored = 0;
......@@ -2663,8 +2662,8 @@ ibuf_get_volume_buffered_count_func(
const byte* types;
ulint n_fields;
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(ibuf_inside(mtr));
n_fields = rec_get_n_fields_old(rec);
......@@ -3038,8 +3037,8 @@ ibuf_get_entry_counter_low_func(
ulint len;
ut_ad(ibuf_inside(mtr));
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(rec_get_n_fields_old(rec) > 2);
field = rec_get_nth_field_old(rec, IBUF_REC_FIELD_MARKER, &len);
......@@ -3113,7 +3112,7 @@ ibuf_get_entry_counter_func(
in the node pointer */
{
ut_ad(ibuf_inside(mtr));
ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX));
ut_ad(page_validate(page_align(rec), ibuf.index));
if (page_rec_is_supremum(rec)) {
......
......@@ -129,7 +129,7 @@ btr_leaf_page_release(
|| latch_mode == BTR_MODIFY_LEAF
|| latch_mode == BTR_NO_LATCHES);
ut_ad(!mtr_memo_contains(mtr, block, MTR_MEMO_MODIFY));
ut_ad(!mtr->memo_contains_flagged(block, MTR_MEMO_MODIFY));
mtr_memo_type_t mode;
switch (latch_mode) {
......
......@@ -129,8 +129,8 @@ btr_cur_compress_recommendation(
{
const page_t* page;
ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor),
MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(btr_cur_get_block(cursor),
MTR_MEMO_PAGE_X_FIX));
page = btr_cur_get_page(cursor);
......@@ -167,8 +167,8 @@ btr_cur_can_delete_without_compress(
{
page_t* page;
ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor),
MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(btr_cur_get_block(cursor),
MTR_MEMO_PAGE_X_FIX));
page = btr_cur_get_page(cursor);
......
......@@ -55,18 +55,6 @@ savepoint. */
#define mtr_memo_release(m, o, t) \
(m)->memo_release((o), (t))
#ifdef UNIV_DEBUG
/** Check if memo contains the given item.
@return TRUE if contains */
#define mtr_memo_contains(m, o, t) \
(m)->memo_contains((m)->get_memo(), (o), (t))
/** Check if memo contains the given page.
@return TRUE if contains */
#define mtr_memo_contains_page(m, p, t) \
(m)->memo_contains_page_flagged((p), (t))
#endif /* UNIV_DEBUG */
/** Print info of an mtr handle. */
#define mtr_print(m) (m)->print()
......@@ -84,12 +72,6 @@ savepoint. */
#define mtr_x_lock_index(i, m) (m)->x_lock(&(i)->lock, __FILE__, __LINE__)
#define mtr_sx_lock_index(i, m) (m)->sx_lock(&(i)->lock, __FILE__, __LINE__)
#define mtr_memo_contains_flagged(m, p, l) \
(m)->memo_contains_flagged((p), (l))
#define mtr_memo_contains_page_flagged(m, p, l) \
(m)->memo_contains_page_flagged((p), (l))
#define mtr_release_block_at_savepoint(m, s, b) \
(m)->release_block_at_savepoint((s), (b))
......@@ -331,16 +313,12 @@ struct mtr_t {
bool is_inside_ibuf() const { return m_inside_ibuf; }
#ifdef UNIV_DEBUG
/** Check if memo contains the given item.
@param memo memo stack
@param object object to search
@param type type of object
@return true if contains */
static bool memo_contains(
const mtr_buf_t* memo,
const void* object,
mtr_memo_type_t type)
MY_ATTRIBUTE((warn_unused_result));
/** Check if we are holding an rw-latch in this mini-transaction
@param lock latch to search for
@param type held latch type
@return whether (lock,type) is contained */
bool memo_contains(const rw_lock_t &lock, mtr_memo_type_t type)
MY_ATTRIBUTE((warn_unused_result));
/** Check if memo contains the given item.
@param object object to search
......
......@@ -46,7 +46,7 @@ page_update_max_trx_id(
mtr_t* mtr) /*!< in/out: mini-transaction */
{
ut_ad(block);
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(trx_id);
ut_ad(page_is_leaf(buf_block_get_frame(block)));
......@@ -83,8 +83,8 @@ page_set_ssn_id(
node_seq_t ssn_id, /*!< in: transaction id */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
ut_ad(mtr_memo_contains_flagged(mtr, block,
MTR_MEMO_PAGE_SX_FIX | MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_SX_FIX |
MTR_MEMO_PAGE_X_FIX));
ut_ad(!page_zip || page_zip == &block->page.zip);
constexpr uint16_t field= FIL_RTREE_SPLIT_SEQ_NUM;
byte *b= my_assume_aligned<2>(&block->frame[field]);
......
......@@ -661,34 +661,31 @@ inline lsn_t mtr_t::finish_write(ulint len)
}
#ifdef UNIV_DEBUG
/** Check if memo contains the given item.
@return true if contains */
bool
mtr_t::memo_contains(
const mtr_buf_t* memo,
const void* object,
mtr_memo_type_t type)
/** Check if we are holding an rw-latch in this mini-transaction
@param lock latch to search for
@param type held latch type
@return whether (lock,type) is contained */
bool mtr_t::memo_contains(const rw_lock_t &lock, mtr_memo_type_t type)
{
Iterate<Find> iteration(Find(object, type));
if (memo->for_each_block_in_reverse(iteration)) {
return(false);
}
Iterate<Find> iteration(Find(&lock, type));
if (m_memo.for_each_block_in_reverse(iteration))
return false;
switch (type) {
case MTR_MEMO_X_LOCK:
ut_ad(rw_lock_own((rw_lock_t*) object, RW_LOCK_X));
break;
case MTR_MEMO_SX_LOCK:
ut_ad(rw_lock_own((rw_lock_t*) object, RW_LOCK_SX));
break;
case MTR_MEMO_S_LOCK:
ut_ad(rw_lock_own((rw_lock_t*) object, RW_LOCK_S));
break;
default:
break;
}
switch (type) {
case MTR_MEMO_X_LOCK:
ut_ad(rw_lock_own(const_cast<rw_lock_t*>(&lock), RW_LOCK_X));
break;
case MTR_MEMO_SX_LOCK:
ut_ad(rw_lock_own(const_cast<rw_lock_t*>(&lock), RW_LOCK_SX));
break;
case MTR_MEMO_S_LOCK:
ut_ad(rw_lock_own(const_cast<rw_lock_t*>(&lock), RW_LOCK_S));
break;
default:
break;
}
return(true);
return true;
}
/** Debug check for flags */
......
......@@ -197,7 +197,7 @@ page_set_max_trx_id(
trx_id_t trx_id, /*!< in: transaction id */
mtr_t* mtr) /*!< in/out: mini-transaction, or NULL */
{
ut_ad(!mtr || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(!mtr || mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(!page_zip || page_zip == &block->page.zip);
static_assert((PAGE_HEADER + PAGE_MAX_TRX_ID) % 8 == 0, "alignment");
byte *max_trx_id= my_assume_aligned<8>(PAGE_MAX_TRX_ID +
......
......@@ -4423,7 +4423,7 @@ page_zip_reorganize(
buf_block_t* temp_block;
page_t* temp_page;
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(block->page.zip.data);
ut_ad(page_is_comp(page));
ut_ad(!dict_index_is_ibuf(index));
......@@ -4530,8 +4530,8 @@ page_zip_copy_recs(
page_t* page = block->frame;
page_zip_des_t* page_zip = &block->page.zip;
ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr_memo_contains_page(mtr, src, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->memo_contains_page_flagged(src, MTR_MEMO_PAGE_X_FIX));
ut_ad(!dict_index_is_ibuf(index));
ut_ad(!index->table->is_temporary());
#ifdef UNIV_ZIP_DEBUG
......
......@@ -873,8 +873,8 @@ row_vers_old_has_index_entry(
mem_heap_t* v_heap = NULL;
dtuple_t* cur_vrow = NULL;
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
clust_index = dict_table_get_first_index(index->table);
comp = page_rec_is_comp(rec);
......@@ -1126,9 +1126,9 @@ row_vers_build_for_consistent_read(
byte* buf;
dberr_t err;
ut_ad(dict_index_is_clust(index));
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(index->is_primary());
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(!rw_lock_own(&(purge_sys.latch), RW_LOCK_S));
ut_ad(rec_offs_validate(rec, index, *offsets));
......@@ -1239,9 +1239,9 @@ row_vers_build_for_semi_consistent_read(
byte* buf;
trx_id_t rec_trx_id = 0;
ut_ad(dict_index_is_clust(index));
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(index->is_primary());
ut_ad(mtr->memo_contains_page_flagged(rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(!rw_lock_own(&(purge_sys.latch), RW_LOCK_S));
ut_ad(rec_offs_validate(rec, index, *offsets));
......
......@@ -2245,9 +2245,9 @@ trx_undo_prev_version_build(
ut_ad(!index->table->is_temporary());
ut_ad(!rw_lock_own(&purge_sys.latch, RW_LOCK_S));
ut_ad(mtr_memo_contains_page_flagged(index_mtr, index_rec,
MTR_MEMO_PAGE_S_FIX
| MTR_MEMO_PAGE_X_FIX));
ut_ad(index_mtr->memo_contains_page_flagged(index_rec,
MTR_MEMO_PAGE_S_FIX
| MTR_MEMO_PAGE_X_FIX));
ut_ad(rec_offs_validate(rec, index, offsets));
ut_a(index->is_primary());
......
......@@ -312,7 +312,7 @@ trx_rseg_header_create(
{
buf_block_t* block;
ut_ad(mtr_memo_contains(mtr, &space->latch, MTR_MEMO_X_LOCK));
ut_ad(mtr->memo_contains(space->latch, MTR_MEMO_X_LOCK));
ut_ad(!sys_header == (space == fil_system.temp_space));
/* Allocate a new file segment for the rollback segment */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment