Commit 87839258 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-21174: Replace mlog_memset() with mtr_t::memset()

Passing buf_block_t helps us avoid calling
mlog_write_initial_log_record_fast() and page_get_page_no(),
and allows us to implement more debug checks, such as
that on ROW_FORMAT=COMPRESSED index pages, only the page header
may be modified by MLOG_MEMSET records.

fseg_n_reserved_pages(): Add a buf_block_t parameter.
parent caea64df
......@@ -607,42 +607,35 @@ btr_get_size(
mtr_t* mtr) /*!< in/out: mini-transaction where index
is s-latched */
{
fseg_header_t* seg_header;
page_t* root;
ulint n=0;
ulint dummy;
ut_ad(srv_read_only_mode
|| mtr_memo_contains(mtr, dict_index_get_lock(index),
MTR_MEMO_S_LOCK));
ut_ad(flag == BTR_N_LEAF_PAGES || flag == BTR_TOTAL_SIZE);
if (index->page == FIL_NULL
|| dict_index_is_online_ddl(index)
|| !index->is_committed()) {
|| !index->is_committed()
|| !index->table->space) {
return(ULINT_UNDEFINED);
}
root = btr_root_get(index, mtr);
if (root) {
if (flag == BTR_N_LEAF_PAGES) {
seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF;
fseg_n_reserved_pages(seg_header, &n, mtr);
} else if (flag == BTR_TOTAL_SIZE) {
seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_TOP;
n = fseg_n_reserved_pages(seg_header, &dummy, mtr);
seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF;
n += fseg_n_reserved_pages(seg_header, &dummy, mtr);
} else {
ut_error;
buf_block_t* root = btr_root_block_get(index, RW_SX_LATCH, mtr);
if (!root) {
return ULINT_UNDEFINED;
}
mtr_x_lock_space(index->table->space, mtr);
if (flag == BTR_N_LEAF_PAGES) {
fseg_n_reserved_pages(*root, PAGE_HEADER + PAGE_BTR_SEG_LEAF
+ root->frame, &n, mtr);
} else {
n = ULINT_UNDEFINED;
ulint dummy;
n = fseg_n_reserved_pages(*root, PAGE_HEADER + PAGE_BTR_SEG_TOP
+ root->frame, &dummy, mtr);
n += fseg_n_reserved_pages(*root,
PAGE_HEADER + PAGE_BTR_SEG_LEAF
+ root->frame, &dummy, mtr);
}
return(n);
......@@ -662,9 +655,6 @@ btr_get_size_and_reserved(
mtr_t* mtr) /*!< in/out: mini-transaction where index
is s-latched */
{
fseg_header_t* seg_header;
page_t* root;
ulint n=ULINT_UNDEFINED;
ulint dummy;
ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
......@@ -674,26 +664,26 @@ btr_get_size_and_reserved(
if (index->page == FIL_NULL
|| dict_index_is_online_ddl(index)
|| !index->is_committed()) {
|| !index->is_committed()
|| !index->table->space) {
return(ULINT_UNDEFINED);
}
root = btr_root_get(index, mtr);
buf_block_t* root = btr_root_block_get(index, RW_SX_LATCH, mtr);
*used = 0;
if (!root) {
return ULINT_UNDEFINED;
}
if (root) {
seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_LEAF;
n = fseg_n_reserved_pages(seg_header, used, mtr);
mtr_x_lock_space(index->table->space, mtr);
ulint n = fseg_n_reserved_pages(*root, PAGE_HEADER + PAGE_BTR_SEG_LEAF
+ root->frame, used, mtr);
if (flag == BTR_TOTAL_SIZE) {
seg_header = root + PAGE_HEADER + PAGE_BTR_SEG_TOP;
n += fseg_n_reserved_pages(seg_header, &dummy, mtr);
n += fseg_n_reserved_pages(*root,
PAGE_HEADER + PAGE_BTR_SEG_TOP
+ root->frame, &dummy, mtr);
*used += dummy;
}
}
return(n);
......@@ -1148,7 +1138,7 @@ btr_create(
/* Set the next node and previous node fields */
compile_time_assert(FIL_PAGE_NEXT == FIL_PAGE_PREV + 4);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(block, FIL_PAGE_PREV, 8, 0xff, mtr);
mtr->memset(block, FIL_PAGE_PREV, 8, 0xff);
/* We reset the free bits for the page in a separate
mini-transaction to allow creation of several trees in the
......@@ -1847,8 +1837,8 @@ void btr_set_instant(buf_block_t* root, const dict_index_t& index, mtr_t* mtr)
}
if (index.table->instant) {
mlog_memset(root, infimum - root->frame, 8, 0, mtr);
mlog_memset(root, supremum - root->frame, 7, 0, mtr);
mtr->memset(root, infimum - root->frame, 8, 0);
mtr->memset(root, supremum - root->frame, 7, 0);
mtr->write<1,mtr_t::OPT>(*root, &supremum[7],
index.n_core_null_bytes);
}
......@@ -1939,7 +1929,7 @@ btr_root_raise_and_insert(
} else {
compile_time_assert(FIL_PAGE_NEXT == FIL_PAGE_PREV + 4);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(new_block, FIL_PAGE_PREV, 8, 0xff, mtr);
mtr->memset(new_block, FIL_PAGE_PREV, 8, 0xff);
if (UNIV_LIKELY_NULL(new_page_zip)) {
static_assert(FIL_PAGE_PREV % 8 == 0, "alignment");
memset_aligned<8>(new_page_zip->data + FIL_PAGE_PREV,
......@@ -1988,8 +1978,7 @@ btr_root_raise_and_insert(
memset_aligned<8>(p, 0, 8);
page_zip_write_header(root_page_zip, p, 8, mtr);
} else if (mach_read_from_8(p)) {
mlog_memset(root, PAGE_HEADER + PAGE_MAX_TRX_ID, 8, 0,
mtr);
mtr->memset(root, PAGE_HEADER + PAGE_MAX_TRX_ID, 8, 0);
}
} else {
/* PAGE_ROOT_AUTO_INC is only present in the clustered index
......@@ -2002,8 +1991,8 @@ btr_root_raise_and_insert(
memset_aligned<8>(p, 0, 8);
page_zip_write_header(new_page_zip, p, 8, mtr);
} else if (mach_read_from_8(p)) {
mlog_memset(new_block, PAGE_HEADER + PAGE_MAX_TRX_ID,
8, 0, mtr);
mtr->memset(new_block, PAGE_HEADER + PAGE_MAX_TRX_ID,
8, 0);
}
}
......
......@@ -113,7 +113,7 @@ PageBulk::init()
compile_time_assert(FIL_PAGE_NEXT
== FIL_PAGE_PREV + 4);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(new_block, FIL_PAGE_PREV, 8, 0xff, &m_mtr);
m_mtr.memset(new_block, FIL_PAGE_PREV, 8, 0xff);
m_mtr.write<2,mtr_t::OPT>(*new_block,
PAGE_HEADER + PAGE_LEVEL
+ new_page, m_level);
......
......@@ -7744,8 +7744,7 @@ btr_store_big_rec_extern_fields(
compile_time_assert(FIL_PAGE_NEXT
== FIL_PAGE_PREV + 4);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(block, FIL_PAGE_PREV, 8, 0xff,
&mtr);
mtr.memset(block, FIL_PAGE_PREV, 8, 0xff);
/* Write a back pointer to the record
into the otherwise unused area. This
information could be useful in
......@@ -7779,11 +7778,10 @@ btr_store_big_rec_extern_fields(
&mtr);
/* Zero out the unused part of the page. */
if (c_stream.avail_out) {
mlog_memset(block,
mtr.memset(block,
page_zip_get_size(page_zip)
- c_stream.avail_out,
c_stream.avail_out,
0, &mtr);
c_stream.avail_out, 0);
}
/* Copy the page to compressed storage,
because it will be flushed to disk
......@@ -7866,8 +7864,8 @@ btr_store_big_rec_extern_fields(
+ FIL_PAGE_DATA + block->frame,
store_len);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(block, BTR_BLOB_HDR_NEXT_PAGE_NO
+ FIL_PAGE_DATA, 4, 0xff, &mtr);
mtr.memset(block, BTR_BLOB_HDR_NEXT_PAGE_NO
+ FIL_PAGE_DATA, 4, 0xff);
extern_len -= store_len;
......
......@@ -899,12 +899,12 @@ dict_create_index_tree_in_mem(
}
/** Drop the index tree associated with a row in SYS_INDEXES table.
@param[in,out] rec SYS_INDEXES record
@param[in,out] pcur persistent cursor on rec
@param[in,out] trx dictionary transaction
@param[in,out] mtr mini-transaction */
void dict_drop_index_tree(rec_t* rec, btr_pcur_t* pcur, trx_t* trx, mtr_t* mtr)
void dict_drop_index_tree(btr_pcur_t* pcur, trx_t* trx, mtr_t* mtr)
{
rec_t* rec = btr_pcur_get_rec(pcur);
byte* ptr;
ulint len;
......@@ -925,7 +925,7 @@ void dict_drop_index_tree(rec_t* rec, btr_pcur_t* pcur, trx_t* trx, mtr_t* mtr)
}
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(ptr, 4, 0xff, mtr);
mtr->memset(btr_pcur_get_block(pcur), page_offset(ptr), 4, 0xff);
ptr = rec_get_nth_field_old(
rec, DICT_FLD__SYS_INDEXES__SPACE, &len);
......
......@@ -257,7 +257,8 @@ Inits an extent descriptor to the free and clean state. */
inline void xdes_init(const buf_block_t &block, xdes_t *descr, mtr_t *mtr)
{
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_SX_FIX));
mlog_memset(descr + XDES_BITMAP, XDES_SIZE - XDES_BITMAP, 0xff, mtr);
mtr->memset(&block, uint16_t(descr - block.frame) + XDES_BITMAP,
XDES_SIZE - XDES_BITMAP, 0xff);
xdes_set_state(block, descr, XDES_FREE, mtr);
}
......@@ -1574,7 +1575,7 @@ static void fsp_free_seg_inode(
static
fseg_inode_t*
fseg_inode_try_get(
fseg_header_t* header,
const fseg_header_t* header,
ulint space,
ulint zip_size,
mtr_t* mtr,
......@@ -1611,7 +1612,7 @@ fseg_inode_try_get(
static
fseg_inode_t*
fseg_inode_get(
fseg_header_t* header,
const fseg_header_t* header,
ulint space,
ulint zip_size,
mtr_t* mtr,
......@@ -1820,8 +1821,8 @@ fseg_create(
mtr->write<4>(*iblock, inode + FSEG_MAGIC_N, FSEG_MAGIC_N_VALUE);
compile_time_assert(FSEG_FRAG_SLOT_SIZE == 4);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(iblock, uint16_t(inode - iblock->frame) + FSEG_FRAG_ARR,
FSEG_FRAG_SLOT_SIZE * FSEG_FRAG_ARR_N_SLOTS, 0xff, mtr);
mtr->memset(iblock, uint16_t(inode - iblock->frame) + FSEG_FRAG_ARR,
FSEG_FRAG_SLOT_SIZE * FSEG_FRAG_ARR_N_SLOTS, 0xff);
if (page == 0) {
block = fseg_alloc_free_page_low(space,
......@@ -1895,30 +1896,22 @@ fseg_n_reserved_pages_low(
return(ret);
}
/**********************************************************************//**
Calculates the number of pages reserved by a segment, and how many pages are
currently used.
/** Calculate the number of pages reserved by a segment,
and how many pages are currently used.
@param[in] block buffer block containing the file segment header
@param[in] header file segment header
@param[out] used number of pages that are used (not more than reserved)
@param[in,out] mtr mini-transaction
@return number of reserved pages */
ulint
fseg_n_reserved_pages(
/*==================*/
fseg_header_t* header, /*!< in: segment header */
ulint* used, /*!< out: number of pages used (<= reserved) */
mtr_t* mtr) /*!< in/out: mini-transaction */
ulint fseg_n_reserved_pages(const buf_block_t &block,
const fseg_header_t *header, ulint *used,
mtr_t *mtr)
{
ulint ret;
fseg_inode_t* inode;
ulint space_id;
fil_space_t* space;
space_id = page_get_space_id(page_align(header));
space = mtr_x_lock_space(space_id, mtr);
inode = fseg_inode_get(header, space_id, space->zip_size(), mtr);
ret = fseg_n_reserved_pages_low(inode, used, mtr);
return(ret);
ut_ad(page_align(header) == block.frame);
return fseg_n_reserved_pages_low(fseg_inode_get(header,
block.page.id.space(),
block.zip_size(), mtr),
used, mtr);
}
/** Tries to fill the free list of a segment with consecutive free extents.
......@@ -2588,6 +2581,7 @@ fseg_free_page_low(
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
ut_ad(iblock->frame == page_align(seg_inode));
ut_d(space->modify_check(*mtr));
#ifdef BTR_CUR_HASH_ADAPT
/* Drop search system page hash index if the page is found in
......@@ -2621,8 +2615,9 @@ fseg_free_page_low(
}
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(seg_inode + FSEG_FRAG_ARR
+ i * FSEG_FRAG_SLOT_SIZE, 4, 0xff, mtr);
mtr->memset(iblock, uint16_t(seg_inode - iblock->frame)
+ FSEG_FRAG_ARR
+ i * FSEG_FRAG_SLOT_SIZE, 4, 0xff);
break;
}
......
......@@ -411,7 +411,6 @@ ibuf_init_at_db_start(void)
{
page_t* root;
ulint n_used;
page_t* header_page;
ut_ad(!ibuf.index);
mtr_t mtr;
......@@ -419,7 +418,9 @@ ibuf_init_at_db_start(void)
compile_time_assert(IBUF_SPACE_ID == TRX_SYS_SPACE);
compile_time_assert(IBUF_SPACE_ID == 0);
mtr_x_lock_space(fil_system.sys_space, &mtr);
header_page = ibuf_header_page_get(&mtr);
buf_block_t* header_page = buf_page_get(
page_id_t(IBUF_SPACE_ID, FSP_IBUF_HEADER_PAGE_NO),
0, RW_X_LATCH, &mtr);
if (!header_page) {
mtr.commit();
......@@ -443,8 +444,9 @@ ibuf_init_at_db_start(void)
mutex_enter(&ibuf_mutex);
fseg_n_reserved_pages(header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
&n_used, &mtr);
fseg_n_reserved_pages(*header_page,
IBUF_HEADER + IBUF_TREE_SEG_HEADER
+ header_page->frame, &n_used, &mtr);
ut_ad(n_used >= 2);
......
......@@ -97,11 +97,10 @@ dict_create_index_tree(
const trx_t* trx); /*!< in: InnoDB transaction handle */
/** Drop the index tree associated with a row in SYS_INDEXES table.
@param[in,out] rec SYS_INDEXES record
@param[in,out] pcur persistent cursor on rec
@param[in,out] trx dictionary transaction
@param[in,out] mtr mini-transaction */
void dict_drop_index_tree(rec_t* rec, btr_pcur_t* pcur, trx_t* trx, mtr_t* mtr)
void dict_drop_index_tree(btr_pcur_t* pcur, trx_t* trx, mtr_t* mtr)
MY_ATTRIBUTE((nonnull));
/***************************************************************//**
......
......@@ -391,16 +391,17 @@ fseg_create(
no need to do the check for this individual
operation */
/**********************************************************************//**
Calculates the number of pages reserved by a segment, and how many pages are
currently used.
/** Calculate the number of pages reserved by a segment,
and how many pages are currently used.
@param[in] block buffer block containing the file segment header
@param[in] header file segment header
@param[out] used number of pages that are used (not more than reserved)
@param[in,out] mtr mini-transaction
@return number of reserved pages */
ulint
fseg_n_reserved_pages(
/*==================*/
fseg_header_t* header, /*!< in: segment header */
ulint* used, /*!< out: number of pages used (<= reserved) */
mtr_t* mtr); /*!< in/out: mini-transaction */
ulint fseg_n_reserved_pages(const buf_block_t &block,
const fseg_header_t *header, ulint *used,
mtr_t *mtr)
MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Allocates a single free page from a segment. This function implements
the intelligent allocation strategy which tries to minimize
......
......@@ -74,8 +74,8 @@ inline void flst_init(const buf_block_t* block, uint16_t ofs, mtr_t* mtr)
ut_ad(!mach_read_from_2(FLST_FIRST + FIL_ADDR_BYTE + ofs + block->frame));
ut_ad(!mach_read_from_2(FLST_LAST + FIL_ADDR_BYTE + ofs + block->frame));
compile_time_assert(FIL_NULL == 0xffU * 0x1010101U);
mlog_memset(block, FLST_FIRST + FIL_ADDR_PAGE + ofs, 4, 0xff, mtr);
mlog_memset(block, FLST_LAST + FIL_ADDR_PAGE + ofs, 4, 0xff, mtr);
mtr->memset(block, FLST_FIRST + FIL_ADDR_PAGE + ofs, 4, 0xff);
mtr->memset(block, FLST_LAST + FIL_ADDR_PAGE + ofs, 4, 0xff);
}
/** Write a null file address.
......@@ -85,7 +85,7 @@ inline void flst_init(const buf_block_t* block, uint16_t ofs, mtr_t* mtr)
inline void flst_zero_addr(const buf_block_t& b, fil_faddr_t *addr, mtr_t *mtr)
{
if (mach_read_from_4(addr + FIL_ADDR_PAGE) != FIL_NULL)
mlog_memset(&b, ulint(addr - b.frame) + FIL_ADDR_PAGE, 4, 0xff, mtr);
mtr->memset(&b, ulint(addr - b.frame) + FIL_ADDR_PAGE, 4, 0xff);
mtr->write<2,mtr_t::OPT>(b, addr + FIL_ADDR_BYTE, 0U);
}
......
......@@ -53,22 +53,6 @@ mlog_log_string(
ulint len, /*!< in: string length */
mtr_t* mtr); /*!< in: mini-transaction handle */
/** Initialize a string of bytes.
@param[in,out] b buffer page
@param[in] ofs byte offset from block->frame
@param[in] len length of the data to write
@param[in] val the data byte to write
@param[in,out] mtr mini-transaction */
void
mlog_memset(const buf_block_t* b, ulint ofs, ulint len, byte val, mtr_t* mtr);
/** Initialize a string of bytes.
@param[in,out] byte byte address
@param[in] len length of the data to write
@param[in] val the data byte to write
@param[in,out] mtr mini-transaction */
void mlog_memset(byte* b, ulint len, byte val, mtr_t* mtr);
/********************************************************//**
Catenates 1 - 4 bytes to the mtr log. The value is not compressed. */
UNIV_INLINE
......@@ -217,7 +201,7 @@ mlog_parse_initial_log_record(
ulint* space, /*!< out: space id */
ulint* page_no);/*!< out: page number */
/********************************************************//**
Parses a log record written by mtr_t::write(), mlog_memset().
Parses a log record written by mtr_t::write(), mtr_t::memset().
@return parsed record end, NULL if not a complete record */
const byte*
mlog_parse_nbytes(
......
......@@ -447,6 +447,13 @@ struct mtr_t {
inline void write(const buf_block_t &block, byte *ptr, V val)
MY_ATTRIBUTE((nonnull));
/** Initialize a string of bytes.
@param[in,out] b buffer page
@param[in] ofs byte offset from b->frame
@param[in] len length of the data to write
@param[in] val the data byte to write */
void memset(const buf_block_t* b, ulint ofs, ulint len, byte val);
private:
/**
Write a log record for writing 1, 2, or 4 bytes.
......
......@@ -50,7 +50,38 @@ mlog_catenate_string(
}
/********************************************************//**
Parses an initial log record written by mlog_write_initial_log_record_low().
Writes the initial part of a log record consisting of one-byte item
type and four-byte space and page numbers. Also pushes info
to the mtr memo that a buffer page has been modified. */
void
mlog_write_initial_log_record(
/*==========================*/
const byte* ptr, /*!< in: pointer to (inside) a buffer
frame holding the file page where
modification is made */
mlog_id_t type, /*!< in: log item type: MLOG_1BYTE, ... */
mtr_t* mtr) /*!< in: mini-transaction handle */
{
byte* log_ptr;
ut_ad(type <= MLOG_BIGGEST_TYPE);
ut_ad(type > MLOG_8BYTES);
log_ptr = mlog_open(mtr, 11);
/* If no logging is requested, we may return now */
if (log_ptr == NULL) {
return;
}
log_ptr = mlog_write_initial_log_record_fast(ptr, type, log_ptr, mtr);
mlog_close(mtr, log_ptr);
}
/********************************************************//**
Parses an initial log record written by mlog_write_initial_log_record.
@return parsed record end, NULL if not a complete record */
const byte*
mlog_parse_initial_log_record(
......@@ -90,7 +121,7 @@ mlog_parse_initial_log_record(
}
/********************************************************//**
Parses a log record written by mtr_t::write(), mlog_memset().
Parses a log record written by mtr_t::write(), mtr_t::memset().
@return parsed record end, NULL if not a complete record or a corrupt record */
const byte*
mlog_parse_nbytes(
......@@ -377,66 +408,31 @@ mlog_parse_string(
@param[in,out] b buffer page
@param[in] ofs byte offset from block->frame
@param[in] len length of the data to write
@param[in] val the data byte to write
@param[in,out] mtr mini-transaction */
void
mlog_memset(const buf_block_t* b, ulint ofs, ulint len, byte val, mtr_t* mtr)
@param[in] val the data byte to write */
void mtr_t::memset(const buf_block_t* b, ulint ofs, ulint len, byte val)
{
ut_ad(len);
ut_ad(ofs <= ulint(srv_page_size));
ut_ad(ofs + len <= ulint(srv_page_size));
memset(ofs + b->frame, val, len);
mtr->set_modified();
switch (mtr->get_log_mode()) {
case MTR_LOG_NONE:
case MTR_LOG_NO_REDO:
ut_ad(ofs + len < PAGE_DATA || !b->page.zip.data ||
mach_read_from_2(b->frame + FIL_PAGE_TYPE) <= FIL_PAGE_TYPE_ZBLOB2);
::memset(ofs + b->frame, val, len);
set_modified();
if (get_log_mode() != MTR_LOG_ALL)
{
ut_ad(get_log_mode() == MTR_LOG_NONE ||
get_log_mode() == MTR_LOG_NO_REDO);
return;
case MTR_LOG_SHORT_INSERTS:
ut_ad(0);
/* fall through */
case MTR_LOG_ALL:
break;
}
byte* l = mtr->get_log()->open(11 + 2 + 2 + 1);
l = mlog_write_initial_log_record_low(
MLOG_MEMSET, b->page.id.space(), b->page.id.page_no(), l, mtr);
byte *l= get_log()->open(11 + 2 + 2 + 1);
l= mlog_write_initial_log_record_low(MLOG_MEMSET, b->page.id.space(),
b->page.id.page_no(), l, this);
mach_write_to_2(l, ofs);
mach_write_to_2(l + 2, len);
l[4] = val;
mlog_close(mtr, l + 5);
}
/** Initialize a string of bytes.
@param[in,out] byte byte address
@param[in] len length of the data to write
@param[in] val the data byte to write
@param[in,out] mtr mini-transaction */
void mlog_memset(byte* b, ulint len, byte val, mtr_t* mtr)
{
ut_ad(len);
ut_ad(page_offset(b) + len <= ulint(srv_page_size));
memset(b, val, len);
mtr->set_modified();
switch (mtr->get_log_mode()) {
case MTR_LOG_NONE:
case MTR_LOG_NO_REDO:
return;
case MTR_LOG_SHORT_INSERTS:
ut_ad(0);
/* fall through */
case MTR_LOG_ALL:
break;
}
byte* l = mtr->get_log()->open(11 + 2 + 2 + 1);
l = mlog_write_initial_log_record_fast(b, MLOG_MEMSET, l, mtr);
mach_write_to_2(l, page_offset(b));
mach_write_to_2(l + 2, len);
l[4] = val;
mlog_close(mtr, l + 5);
l[4]= val;
mlog_close(this, l + 5);
}
/********************************************************//**
......
......@@ -130,8 +130,7 @@ row_undo_ins_remove_clust_rec(
== RW_X_LATCH);
ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
dict_drop_index_tree(rec, &node->pcur, node->trx,
&mtr);
dict_drop_index_tree(&node->pcur, node->trx, &mtr);
mtr.commit();
mtr.start();
......
......@@ -3124,8 +3124,7 @@ row_upd_clust_step(
ut_ad(!dict_index_is_online_ddl(index));
dict_drop_index_tree(
btr_pcur_get_rec(pcur), pcur, trx, &mtr);
dict_drop_index_tree(pcur, trx, &mtr);
mtr.commit();
......
......@@ -230,9 +230,9 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
/* The undo log segment will not be reused */
ut_a(undo->id < TRX_RSEG_N_SLOTS);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(TRX_RSEG + TRX_RSEG_UNDO_SLOTS
+ undo->id * TRX_RSEG_SLOT_SIZE
+ rseg_header->frame, 4, 0xff, mtr);
mtr->memset(rseg_header,
TRX_RSEG + TRX_RSEG_UNDO_SLOTS
+ undo->id * TRX_RSEG_SLOT_SIZE, 4, 0xff);
MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_USED);
......
......@@ -76,9 +76,9 @@ trx_rseg_write_wsrep_checkpoint(
reinterpret_cast<const byte*>(xid->data),
xid_length, mtr);
if (UNIV_LIKELY(xid_length < XIDDATASIZE)) {
mlog_memset(TRX_RSEG + TRX_RSEG_WSREP_XID_DATA
+ rseg_header->frame + xid_length,
XIDDATASIZE - xid_length, 0, mtr);
mtr->memset(rseg_header,
TRX_RSEG + TRX_RSEG_WSREP_XID_DATA + xid_length,
XIDDATASIZE - xid_length, 0);
}
}
......@@ -115,9 +115,9 @@ trx_rseg_update_wsrep_checkpoint(
@param[in,out] mtr mini-transaction */
static void trx_rseg_clear_wsrep_checkpoint(buf_block_t *block, mtr_t *mtr)
{
mlog_memset(block, TRX_RSEG + TRX_RSEG_WSREP_XID_INFO,
mtr->memset(block, TRX_RSEG + TRX_RSEG_WSREP_XID_INFO,
TRX_RSEG_WSREP_XID_DATA + XIDDATASIZE - TRX_RSEG_WSREP_XID_INFO,
0, mtr);
0);
}
static void
......@@ -283,13 +283,13 @@ bool trx_rseg_read_wsrep_checkpoint(XID& xid)
@param[in,out] mtr mini-transaction */
void trx_rseg_format_upgrade(buf_block_t *rseg_header, mtr_t *mtr)
{
mlog_memset(rseg_header, TRX_RSEG + TRX_RSEG_FORMAT, 4, 0, mtr);
mtr->memset(rseg_header, TRX_RSEG + TRX_RSEG_FORMAT, 4, 0);
/* Clear also possible garbage at the end of the page. Old
InnoDB versions did not initialize unused parts of pages. */
mlog_memset(rseg_header, TRX_RSEG + TRX_RSEG_MAX_TRX_ID + 8,
mtr->memset(rseg_header, TRX_RSEG + TRX_RSEG_MAX_TRX_ID + 8,
srv_page_size
- (FIL_PAGE_DATA_END + TRX_RSEG + TRX_RSEG_MAX_TRX_ID + 8),
0, mtr);
0);
}
/** Create a rollback segment header.
......@@ -330,8 +330,8 @@ trx_rseg_header_create(
flst_init(block, TRX_RSEG_HISTORY + TRX_RSEG, mtr);
/* Reset the undo log slots */
mlog_memset(block, TRX_RSEG_UNDO_SLOTS + TRX_RSEG,
TRX_RSEG_N_SLOTS * 4, 0xff, mtr);
mtr->memset(block, TRX_RSEG_UNDO_SLOTS + TRX_RSEG,
TRX_RSEG_N_SLOTS * 4, 0xff);
if (sys_header) {
/* Add the rollback segment info to the free slot in
......@@ -643,9 +643,9 @@ trx_rseg_array_init()
}
/* Finally, clear WSREP XID in TRX_SYS page. */
mlog_memset(trx_sysf_get(&mtr),
mtr.memset(trx_sysf_get(&mtr),
TRX_SYS + TRX_SYS_WSREP_XID_INFO,
TRX_SYS_WSREP_XID_LEN, 0, &mtr);
TRX_SYS_WSREP_XID_LEN, 0);
mtr.commit();
}
#endif
......
......@@ -179,15 +179,15 @@ trx_sysf_create(
compile_time_assert(TRX_SYS + TRX_SYS_RSEGS
+ 256 * TRX_SYS_RSEG_SLOT_SIZE
<= UNIV_PAGE_SIZE_MIN - FIL_PAGE_DATA_END);
mlog_memset(block, TRX_SYS + TRX_SYS_RSEGS,
256 * TRX_SYS_RSEG_SLOT_SIZE, 0xff, mtr);
mtr->memset(block, TRX_SYS + TRX_SYS_RSEGS,
256 * TRX_SYS_RSEG_SLOT_SIZE, 0xff);
/* Initialize all of the page. This part used to be uninitialized. */
mlog_memset(block, TRX_SYS + TRX_SYS_RSEGS
mtr->memset(block, TRX_SYS + TRX_SYS_RSEGS
+ 256 * TRX_SYS_RSEG_SLOT_SIZE,
srv_page_size
- (FIL_PAGE_DATA_END + TRX_SYS + TRX_SYS_RSEGS
+ 256 * TRX_SYS_RSEG_SLOT_SIZE),
0, mtr);
0);
/* Create the first rollback segment in the SYSTEM tablespace */
slot_no = trx_sys_rseg_find_free(block);
......
......@@ -601,8 +601,8 @@ static void trx_undo_write_xid(buf_block_t *block, uint16_t offset,
reinterpret_cast<const byte*>(xid.data),
xid_length, mtr);
if (UNIV_LIKELY(xid_length < XIDDATASIZE))
mlog_memset(log_hdr + TRX_UNDO_XA_XID + xid_length,
XIDDATASIZE - xid_length, 0, mtr);
mtr->memset(block, offset + TRX_UNDO_XA_XID + xid_length,
XIDDATASIZE - xid_length, 0);
}
/********************************************************************//**
......@@ -946,9 +946,8 @@ static void trx_undo_seg_free(const trx_undo_t* undo, bool noredo)
buf_block_t* rseg_header = trx_rsegf_get(
rseg->space, rseg->page_no, &mtr);
compile_time_assert(FIL_NULL == 0xffffffff);
mlog_memset(rseg_header, TRX_RSEG + TRX_RSEG_UNDO_SLOTS
+ undo->id * TRX_RSEG_SLOT_SIZE, 4, 0xff,
&mtr);
mtr.memset(rseg_header, TRX_RSEG + TRX_RSEG_UNDO_SLOTS
+ undo->id * TRX_RSEG_SLOT_SIZE, 4, 0xff);
MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_USED);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment