Commit b212f1da authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-22107 Restore accidentally orphaned MTR_MEMO_MODIFY

In MDEV-12353, the calls to mtr_t::memo_modify_page()
were accidentally removed along with
mlog_open_and_write_index() and its callers.

Let us resurrect the function to enable better debug checks.

mtr_t::flag_modified(): Renamed from mtr_t::set_modified()
and made private.

mtr_t::set_modified(): Take const buf_block_t& as a parameter.

In several mtr_t member functions, replace const buf_page_t&
parameters with const buf_block_t&, so that we can pass the
parameter to set_modified().

mtr_t::modify(): Add a MTR_MEMO_MODIFY entry for a block that
is guaranteed to be modified in the mini-transaction.
parent 587f3e0d
......@@ -1828,7 +1828,7 @@ inline void mtr_t::log_file_op(mfile_type_t type, ulint space_id,
ut_ad(strchr(path, OS_PATH_SEPARATOR) != NULL);
ut_ad(!strcmp(&path[strlen(path) - strlen(DOT_IBD)], DOT_IBD));
set_modified();
flag_modified();
if (m_log_mode != MTR_LOG_ALL)
return;
m_last= nullptr;
......
......@@ -210,7 +210,7 @@ class PageBulk
return(m_err);
}
void set_modified() { m_mtr.set_modified(); }
void set_modified() { m_mtr.set_modified(*m_block); }
/* Memory heap for internal allocation */
mem_heap_t* m_heap;
......
......@@ -207,7 +207,7 @@ inline bool mtr_t::write(const buf_block_t &block, void *ptr, V val)
p--;
}
::memcpy(ptr, buf, l);
memcpy_low(block.page, static_cast<uint16_t>
memcpy_low(block, static_cast<uint16_t>
(ut_align_offset(p, srv_page_size)), p, end - p);
return true;
}
......@@ -220,7 +220,7 @@ inline bool mtr_t::write(const buf_block_t &block, void *ptr, V val)
inline void mtr_t::memset(const buf_block_t &b, ulint ofs, ulint len, byte val)
{
ut_ad(len);
set_modified();
set_modified(b);
if (m_log_mode != MTR_LOG_ALL)
return;
......@@ -257,7 +257,7 @@ inline void mtr_t::memset(const buf_block_t &b, ulint ofs, size_t len,
{
ut_ad(size);
ut_ad(len > size); /* use mtr_t::memcpy() for shorter writes */
set_modified();
set_modified(b);
if (m_log_mode != MTR_LOG_ALL)
return;
......@@ -303,30 +303,31 @@ inline void mtr_t::memcpy(const buf_block_t &b, ulint offset, ulint len)
ut_ad(len);
ut_ad(offset <= ulint(srv_page_size));
ut_ad(offset + len <= ulint(srv_page_size));
memcpy_low(b.page, uint16_t(offset), &b.frame[offset], len);
memcpy_low(b, uint16_t(offset), &b.frame[offset], len);
}
/** Log a write of a byte string to a page.
@param id page identifier
@param block page
@param offset byte offset within page
@param data data to be written
@param len length of the data, in bytes */
inline void mtr_t::memcpy_low(const buf_page_t &bpage, uint16_t offset,
inline void mtr_t::memcpy_low(const buf_block_t &block, uint16_t offset,
const void *data, size_t len)
{
ut_ad(len);
set_modified();
set_modified(block);
if (m_log_mode != MTR_LOG_ALL)
return;
if (len < mtr_buf_t::MAX_DATA_SIZE - (1 + 3 + 3 + 5 + 5))
{
byte *end= log_write<WRITE>(bpage.id, &bpage, len, true, offset);
byte *end= log_write<WRITE>(block.page.id, &block.page, len, true, offset);
::memcpy(end, data, len);
m_log.close(end + len);
}
else
{
m_log.close(log_write<WRITE>(bpage.id, &bpage, len, false, offset));
m_log.close(log_write<WRITE>(block.page.id, &block.page, len, false,
offset));
m_log.push(static_cast<const byte*>(data), static_cast<uint32_t>(len));
}
m_last_offset= static_cast<uint16_t>(offset + len);
......@@ -348,7 +349,7 @@ inline void mtr_t::memmove(const buf_block_t &b, ulint d, ulint s, ulint len)
ut_ad(d <= ulint(srv_page_size));
ut_ad(d + len <= ulint(srv_page_size));
set_modified();
set_modified(b);
if (m_log_mode != MTR_LOG_ALL)
return;
static_assert(MIN_4BYTE > UNIV_PAGE_SIZE_MAX, "consistency");
......@@ -534,7 +535,7 @@ inline void mtr_t::free(const page_id_t id)
@param type extended record subtype; @see mrec_ext_t */
inline void mtr_t::log_write_extended(const buf_block_t &block, byte type)
{
set_modified();
set_modified(block);
if (m_log_mode != MTR_LOG_ALL)
return;
byte *l= log_write<EXTENDED>(block.page.id, &block.page, 1, true);
......@@ -561,7 +562,7 @@ inline void mtr_t::page_delete(const buf_block_t &block, ulint prev_rec)
{
ut_ad(!block.zip_size());
ut_ad(prev_rec < block.physical_size());
set_modified();
set_modified(block);
if (m_log_mode != MTR_LOG_ALL)
return;
size_t len= (prev_rec < MIN_2BYTE ? 2 : prev_rec < MIN_3BYTE ? 3 : 4);
......@@ -585,7 +586,7 @@ inline void mtr_t::page_delete(const buf_block_t &block, ulint prev_rec,
size_t hdr_size, size_t data_size)
{
ut_ad(!block.zip_size());
set_modified();
set_modified(block);
ut_ad(hdr_size < MIN_3BYTE);
ut_ad(prev_rec < block.physical_size());
ut_ad(data_size < block.physical_size());
......@@ -620,7 +621,7 @@ inline void mtr_t::undo_append(const buf_block_t &block,
const void *data, size_t len)
{
ut_ad(len > 2);
set_modified();
set_modified(block);
if (m_log_mode != MTR_LOG_ALL)
return;
const bool small= len + 1 < mtr_buf_t::MAX_DATA_SIZE - (1 + 3 + 3 + 5 + 5);
......
......@@ -295,8 +295,22 @@ struct mtr_t {
@param[in] type object type: MTR_MEMO_PAGE_X_FIX, ... */
void release_page(const void* ptr, mtr_memo_type_t type);
/** Note that the mini-transaction has modified data. */
void set_modified() { m_modifications = true; }
private:
/** Note that the mini-transaction will modify data. */
void flag_modified() { m_modifications = true; }
#ifdef UNIV_DEBUG
/** Mark the given latched page as modified.
@param block page that will be modified */
void modify(const buf_block_t& block);
public:
/** Note that the mini-transaction will modify a block. */
void set_modified(const buf_block_t &block)
{ flag_modified(); if (m_log_mode == MTR_LOG_ALL) modify(block); }
#else /* UNIV_DEBUG */
public:
/** Note that the mini-transaction will modify a block. */
void set_modified(const buf_block_t &) { flag_modified(); }
#endif /* UNIV_DEBUG */
/** Set the state to not-modified. This will not log the changes.
This is only used during redo log apply, to avoid logging the changes. */
......@@ -345,10 +359,6 @@ struct mtr_t {
const byte* ptr,
ulint flags) const;
/** Mark the given latched page as modified.
@param[in] ptr pointer to within buffer frame */
void memo_modify_page(const byte* ptr);
/** Print info of an mtr handle. */
void print() const;
......@@ -427,16 +437,16 @@ struct mtr_t {
@param[in] b ROW_FORMAT=COMPRESSED index page
@param[in] offset byte offset from b.zip.data
@param[in] len length of the data to write */
inline void zmemcpy(const buf_page_t &b, ulint offset, ulint len);
inline void zmemcpy(const buf_block_t &b, ulint offset, ulint len);
/** Write a byte string to a ROW_FORMAT=COMPRESSED page.
@param[in,out] b ROW_FORMAT=COMPRESSED index page
@param[in] b ROW_FORMAT=COMPRESSED index page
@param[in] dest destination within b.zip.data
@param[in] str the data to write
@param[in] len length of the data to write
@tparam w write request type */
template<write_type w= NORMAL>
inline void zmemcpy(const buf_page_t &b, void *dest, const void *str,
inline void zmemcpy(const buf_block_t &b, void *dest, const void *str,
ulint len);
/** Log an initialization of a string of bytes.
......@@ -565,11 +575,11 @@ struct mtr_t {
private:
/** Log a write of a byte string to a page.
@param b buffer page
@param block buffer page
@param offset byte offset within page
@param data data to be written
@param len length of the data, in bytes */
inline void memcpy_low(const buf_page_t &bpage, uint16_t offset,
inline void memcpy_low(const buf_block_t &block, uint16_t offset,
const void *data, size_t len);
/**
Write a log record.
......
......@@ -780,18 +780,31 @@ mtr_t::memo_contains_page_flagged(
? NULL : iteration.functor.get_block();
}
/** Mark the given latched page as modified.
@param[in] ptr pointer to within buffer frame */
void
mtr_t::memo_modify_page(const byte* ptr)
/** Find a block, preferrably in MTR_MEMO_MODIFY state */
struct FindModified
{
buf_block_t* block = memo_contains_page_flagged(
ptr, MTR_MEMO_PAGE_X_FIX | MTR_MEMO_PAGE_SX_FIX);
ut_ad(block != NULL);
const mtr_memo_slot_t *found= nullptr;
const buf_block_t& block;
if (!memo_contains(get_memo(), block, MTR_MEMO_MODIFY)) {
memo_push(block, MTR_MEMO_MODIFY);
}
FindModified(const buf_block_t &block) : block(block) {}
bool operator()(const mtr_memo_slot_t* slot)
{
if (slot->object != &block)
return true;
found= slot;
return slot->type != MTR_MEMO_MODIFY;
}
};
/** Mark the given latched page as modified.
@param block page that will be modified */
void mtr_t::modify(const buf_block_t &block)
{
Iterate<FindModified> iteration(block);
m_memo.for_each_block_in_reverse(iteration);
ut_ad(iteration.functor.found);
if (iteration.functor.found->type != MTR_MEMO_MODIFY)
memo_push(const_cast<buf_block_t*>(&block), MTR_MEMO_MODIFY);
}
/** Print info of an mtr handle. */
......
......@@ -1104,7 +1104,7 @@ inline void mtr_t::page_insert(const buf_block_t &block, bool reuse,
ut_ad((n_fields_s >> 1) <= REC_MAX_N_FIELDS);
ut_ad(data_l + data_c <= REDUNDANT_REC_MAX_DATA_SIZE);
set_modified();
set_modified(block);
static_assert(REC_INFO_MIN_REC_FLAG == 0x10, "compatibility");
static_assert(REC_INFO_DELETED_FLAG == 0x20, "compatibility");
......@@ -1201,7 +1201,7 @@ inline void mtr_t::page_insert(const buf_block_t &block, bool reuse,
}
#endif
set_modified();
set_modified(block);
static_assert(REC_INFO_MIN_REC_FLAG == 0x10, "compatibility");
static_assert(REC_INFO_DELETED_FLAG == 0x20, "compatibility");
......
......@@ -368,30 +368,30 @@ page_zip_dir_get(
@param[in] b ROW_FORMAT=COMPRESSED index page
@param[in] offset byte offset from b.zip.data
@param[in] len length of the data to write */
inline void mtr_t::zmemcpy(const buf_page_t &b, ulint offset, ulint len)
inline void mtr_t::zmemcpy(const buf_block_t &b, ulint offset, ulint len)
{
ut_ad(mach_read_from_2(b.zip.data + FIL_PAGE_TYPE) == FIL_PAGE_INDEX ||
mach_read_from_2(b.zip.data + FIL_PAGE_TYPE) == FIL_PAGE_RTREE);
ut_ad(page_zip_simple_validate(&b.zip));
ut_ad(offset + len <= page_zip_get_size(&b.zip));
ut_ad(mach_read_from_2(b.page.zip.data + FIL_PAGE_TYPE) == FIL_PAGE_INDEX ||
mach_read_from_2(b.page.zip.data + FIL_PAGE_TYPE) == FIL_PAGE_RTREE);
ut_ad(page_zip_simple_validate(&b.page.zip));
ut_ad(offset + len <= page_zip_get_size(&b.page.zip));
memcpy_low(b, static_cast<uint16_t>(offset), &b.zip.data[offset], len);
memcpy_low(b, static_cast<uint16_t>(offset), &b.page.zip.data[offset], len);
m_last_offset= static_cast<uint16_t>(offset + len);
}
/** Write a byte string to a ROW_FORMAT=COMPRESSED page.
@param[in,out] b ROW_FORMAT=COMPRESSED index page
@param[in] b ROW_FORMAT=COMPRESSED index page
@param[in] dest destination within b.zip.data
@param[in] str the data to write
@param[in] len length of the data to write
@tparam w write request type */
template<mtr_t::write_type w>
inline void mtr_t::zmemcpy(const buf_page_t &b, void *dest, const void *str,
inline void mtr_t::zmemcpy(const buf_block_t &b, void *dest, const void *str,
ulint len)
{
byte *d= static_cast<byte*>(dest);
const byte *s= static_cast<const byte*>(str);
ut_ad(d >= b.zip.data + FIL_PAGE_OFFSET);
ut_ad(d >= b.page.zip.data + FIL_PAGE_OFFSET);
if (w != FORCED)
{
ut_ad(len);
......@@ -409,7 +409,7 @@ inline void mtr_t::zmemcpy(const buf_page_t &b, void *dest, const void *str,
len= static_cast<ulint>(end - d);
}
::memcpy(d, s, len);
zmemcpy(b, d - b.zip.data, len);
zmemcpy(b, d - b.page.zip.data, len);
}
/** Write redo log for compressing a ROW_FORMAT=COMPRESSED index page.
......@@ -448,10 +448,10 @@ static void page_zip_compress_write_log(buf_block_t *block,
ut_a(page_zip->m_end + trailer_size <= page_zip_get_size(page_zip));
mtr->init(block);
mtr->zmemcpy(block->page, FIL_PAGE_PREV, page_zip->m_end - FIL_PAGE_PREV);
mtr->zmemcpy(*block, FIL_PAGE_PREV, page_zip->m_end - FIL_PAGE_PREV);
if (trailer_size)
mtr->zmemcpy(block->page, page_zip_get_size(page_zip) - trailer_size,
mtr->zmemcpy(*block, page_zip_get_size(page_zip) - trailer_size,
trailer_size);
block->page.status = buf_page_t::INIT_ON_FLUSH; /* because of mtr_t::init() */
}
......@@ -3639,8 +3639,7 @@ page_zip_write_rec_ext(
byte* sys = storage - sys_len * (heap_no - 1);
memcpy(sys, src, sys_len);
i++; /* skip also roll_ptr */
mtr->zmemcpy(block->page, sys - page_zip->data,
sys_len);
mtr->zmemcpy(*block, sys - page_zip->data, sys_len);
} else if (rec_offs_nth_extern(offsets, i)) {
src = rec_get_nth_field(rec, offsets,
i, &len);
......@@ -3658,7 +3657,7 @@ page_zip_write_rec_ext(
externs -= FIELD_REF_SIZE;
ut_ad(data < externs);
memcpy(externs, src, FIELD_REF_SIZE);
mtr->zmemcpy(block->page, externs - page_zip->data,
mtr->zmemcpy(*block, externs - page_zip->data,
FIELD_REF_SIZE);
}
}
......@@ -3799,7 +3798,7 @@ void page_zip_write_rec(buf_block_t *block, const byte *rec,
memcpy(sys, src, sys_len);
src += sys_len;
mtr->zmemcpy(block->page, sys - page_zip->data,
mtr->zmemcpy(*block, sys - page_zip->data,
sys_len);
/* Log the last bytes of the record. */
len = rec_offs_data_size(offsets)
......@@ -3837,13 +3836,13 @@ void page_zip_write_rec(buf_block_t *block, const byte *rec,
/* Copy the node pointer to the uncompressed area. */
byte* node_ptr = storage - REC_NODE_PTR_SIZE * (heap_no - 1);
mtr->zmemcpy<mtr_t::OPT>(block->page, node_ptr,
mtr->zmemcpy<mtr_t::OPT>(*block, node_ptr,
rec + len, REC_NODE_PTR_SIZE);
}
ut_a(!*data);
ut_ad((ulint) (data - page_zip->data) < page_zip_get_size(page_zip));
mtr->zmemcpy(block->page, page_zip->m_end,
mtr->zmemcpy(*block, page_zip->m_end,
data - page_zip->data - page_zip->m_end);
page_zip->m_end = uint16_t(data - page_zip->data);
page_zip->m_nonempty = TRUE;
......@@ -3910,7 +3909,7 @@ page_zip_write_blob_ptr(
externs -= (blob_no + 1) * BTR_EXTERN_FIELD_REF_SIZE;
field += len - BTR_EXTERN_FIELD_REF_SIZE;
mtr->zmemcpy<mtr_t::OPT>(block->page, externs, field,
mtr->zmemcpy<mtr_t::OPT>(*block, externs, field,
BTR_EXTERN_FIELD_REF_SIZE);
#ifdef UNIV_ZIP_DEBUG
......@@ -3957,7 +3956,7 @@ page_zip_write_node_ptr(
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
compile_time_assert(REC_NODE_PTR_SIZE == 4);
mach_write_to_4(field, ptr);
mtr->zmemcpy(block->page, storage, field, REC_NODE_PTR_SIZE);
mtr->zmemcpy(*block, storage, field, REC_NODE_PTR_SIZE);
}
/** Write the DB_TRX_ID,DB_ROLL_PTR into a clustered index leaf page record.
......@@ -4050,7 +4049,7 @@ page_zip_write_trx_id_and_roll_ptr(
}
} else {
write:
mtr->zmemcpy<mtr_t::OPT>(block->page, storage, field,
mtr->zmemcpy<mtr_t::OPT>(*block, storage, field,
sys_len - len);
}
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
......@@ -4172,7 +4171,7 @@ void page_zip_rec_set_deleted(buf_block_t *block, rec_t *rec, bool flag,
b|= (PAGE_ZIP_DIR_SLOT_DEL >> 8);
else
b&= byte(~(PAGE_ZIP_DIR_SLOT_DEL >> 8));
mtr->zmemcpy<mtr_t::OPT>(block->page, slot, &b, 1);
mtr->zmemcpy<mtr_t::OPT>(*block, slot, &b, 1);
#ifdef UNIV_ZIP_DEBUG
ut_a(page_zip_validate(&block->page.zip, block->frame, nullptr));
#endif /* UNIV_ZIP_DEBUG */
......@@ -4198,7 +4197,7 @@ page_zip_rec_set_owned(
b|= (PAGE_ZIP_DIR_SLOT_OWNED >> 8);
else
b&= byte(~(PAGE_ZIP_DIR_SLOT_OWNED >> 8));
mtr->zmemcpy<mtr_t::OPT>(block->page, slot, &b, 1);
mtr->zmemcpy<mtr_t::OPT>(*block, slot, &b, 1);
}
/**********************************************************************//**
......@@ -4285,7 +4284,7 @@ page_zip_dir_insert(
/* Write the entry for the inserted record.
The "owned" and "deleted" flags must be zero. */
mach_write_to_2(slot_rec - PAGE_ZIP_DIR_SLOT_SIZE, page_offset(rec));
mtr->zmemcpy(cursor->block->page, slot_rec - page_zip->data
mtr->zmemcpy(*cursor->block, slot_rec - page_zip->data
- PAGE_ZIP_DIR_SLOT_SIZE, PAGE_ZIP_DIR_SLOT_SIZE);
}
......@@ -4362,7 +4361,7 @@ void page_zip_dir_delete(buf_block_t *block, byte *rec,
/* Write the entry for the deleted record.
The "owned" and "deleted" flags will be cleared. */
mach_write_to_2(slot_free, page_offset(rec));
mtr->zmemcpy(block->page, slot_free - page_zip->data, 2);
mtr->zmemcpy(*block, slot_free - page_zip->data, 2);
if (const ulint n_ext= rec_offs_n_extern(offsets))
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment