Commit 2e7a0842 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-21174: Remove mlog_write_initial_log_record_fast()

Pass buf_block_t* to all functions that write redo log.
Specifically, replace the parameters page,page_zip
with buf_block_t* block in page_zip_ functions.
parent 498f84a8
......@@ -444,7 +444,7 @@ btr_page_create(
if (UNIV_LIKELY_NULL(page_zip)) {
page_create_zip(block, index, level, 0, mtr);
mach_write_to_8(index_id, index->id);
page_zip_write_header(page_zip, index_id, 8, mtr);
page_zip_write_header(block, index_id, 8, mtr);
} else {
page_create(block, mtr, dict_table_is_comp(index->table));
if (index->is_spatial()) {
......@@ -988,7 +988,7 @@ static void btr_free_root(buf_block_t *block, mtr_t *mtr, bool invalidate)
if (UNIV_LIKELY_NULL(block->page.zip.data))
{
mach_write_to_8(page_index_id, BTR_FREED_INDEX_ID);
page_zip_write_header(&block->page.zip, page_index_id, 8, mtr);
page_zip_write_header(block, page_index_id, 8, mtr);
}
else
mtr->write<8,mtr_t::OPT>(*block, page_index_id, BTR_FREED_INDEX_ID);
......@@ -1126,7 +1126,7 @@ btr_create(
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
page_create_zip(block, index, 0, 0, mtr);
mach_write_to_8(page_index_id, index_id);
page_zip_write_header(&block->page.zip, page_index_id, 8, mtr);
page_zip_write_header(block, page_index_id, 8, mtr);
static_assert(FIL_PAGE_PREV % 8 == 0, "alignment");
memset_aligned<8>(FIL_PAGE_PREV + block->page.zip.data,
0xff, 8);
......@@ -1910,9 +1910,9 @@ btr_root_raise_and_insert(
set PAGE_MAX_TRX_ID on all secondary index pages.) */
byte* p = my_assume_aligned<8>(
PAGE_HEADER + PAGE_MAX_TRX_ID + root->frame);
if (UNIV_LIKELY_NULL(root_page_zip)) {
if (UNIV_LIKELY_NULL(root->page.zip.data)) {
memset_aligned<8>(p, 0, 8);
page_zip_write_header(root_page_zip, p, 8, mtr);
page_zip_write_header(root, p, 8, mtr);
} else if (mach_read_from_8(p)) {
mtr->memset(root, PAGE_HEADER + PAGE_MAX_TRX_ID, 8, 0);
}
......@@ -1922,9 +1922,9 @@ btr_root_raise_and_insert(
the field PAGE_MAX_TRX_ID for future use. */
byte* p = my_assume_aligned<8>(
PAGE_HEADER + PAGE_MAX_TRX_ID + new_block->frame);
if (UNIV_LIKELY_NULL(new_page_zip)) {
if (UNIV_LIKELY_NULL(new_block->page.zip.data)) {
memset_aligned<8>(p, 0, 8);
page_zip_write_header(new_page_zip, p, 8, mtr);
page_zip_write_header(new_block, p, 8, mtr);
} else if (mach_read_from_8(p)) {
mtr->memset(new_block, PAGE_HEADER + PAGE_MAX_TRX_ID,
8, 0);
......
......@@ -42,7 +42,6 @@ PageBulk::init()
{
buf_block_t* new_block;
page_t* new_page;
page_zip_des_t* new_page_zip;
ulint new_page_no;
ut_ad(m_heap == NULL);
......@@ -81,22 +80,20 @@ PageBulk::init()
alloc_mtr.commit();
new_page = buf_block_get_frame(new_block);
new_page_zip = buf_block_get_page_zip(new_block);
new_page_no = page_get_page_no(new_page);
byte* index_id = PAGE_HEADER + PAGE_INDEX_ID + new_page;
if (new_page_zip) {
if (UNIV_LIKELY_NULL(new_block->page.zip.data)) {
page_create_zip(new_block, m_index, m_level, 0,
&m_mtr);
static_assert(FIL_PAGE_PREV % 8 == 0, "alignment");
memset_aligned<8>(FIL_PAGE_PREV + new_page, 0xff, 8);
page_zip_write_header(new_page_zip,
page_zip_write_header(new_block,
FIL_PAGE_PREV + new_page,
8, &m_mtr);
mach_write_to_8(index_id, m_index->id);
page_zip_write_header(new_page_zip, index_id,
8, &m_mtr);
page_zip_write_header(new_block, index_id, 8, &m_mtr);
} else {
ut_ad(!m_index->is_spatial());
page_create(new_block, &m_mtr,
......@@ -115,7 +112,6 @@ PageBulk::init()
false, &m_mtr);
new_page = buf_block_get_frame(new_block);
new_page_zip = buf_block_get_page_zip(new_block);
new_page_no = page_get_page_no(new_page);
ut_ad(m_page_no == new_page_no);
......@@ -124,15 +120,16 @@ PageBulk::init()
btr_page_set_level(new_block, m_level, &m_mtr);
}
m_page_zip = buf_block_get_page_zip(new_block);
if (!m_level && dict_index_is_sec_or_ibuf(m_index)) {
page_update_max_trx_id(new_block, new_page_zip, m_trx_id,
page_update_max_trx_id(new_block, m_page_zip, m_trx_id,
&m_mtr);
}
m_block = new_block;
m_block->skip_flush_check = true;
m_page = new_page;
m_page_zip = new_page_zip;
m_page_no = new_page_no;
m_cur_rec = page_get_infimum_rec(new_page);
ut_ad(m_is_comp == !!page_is_comp(new_page));
......
......@@ -3914,8 +3914,7 @@ static void btr_cur_upd_rec_sys(buf_block_t *block, rec_t* rec,
ut_ad(rec_offs_validate(rec, index, offsets));
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
page_zip_write_trx_id_and_roll_ptr(&block->page.zip,
rec, offsets,
page_zip_write_trx_id_and_roll_ptr(block, rec, offsets,
index->db_trx_id(),
trx->id, roll_ptr, mtr);
} else {
......@@ -4088,12 +4087,11 @@ static
void
row_upd_rec_in_place(
/*=================*/
buf_block_t* block, /*!< in/out: index page */
rec_t* rec, /*!< in/out: record where replaced */
dict_index_t* index, /*!< in: the index the record belongs to */
const offset_t* offsets,/*!< in: array returned by rec_get_offsets() */
const upd_t* update, /*!< in: update vector */
page_zip_des_t* page_zip,/*!< in: compressed page with enough space
available, or NULL */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
const upd_field_t* upd_field;
......@@ -4156,8 +4154,8 @@ row_upd_rec_in_place(
dfield_get_len(new_val));
}
if (page_zip) {
page_zip_write_rec(page_zip, rec, index, offsets, 0, mtr);
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
page_zip_write_rec(block, rec, index, offsets, 0, mtr);
}
}
......@@ -4170,13 +4168,11 @@ btr_cur_parse_update_in_place(
/*==========================*/
const byte* ptr, /*!< in: buffer */
const byte* end_ptr,/*!< in: buffer end */
page_t* page, /*!< in/out: page or NULL */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
buf_block_t* block, /*!< in/out: page or NULL */
dict_index_t* index, /*!< in: index corresponding to page */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
ulint flags;
rec_t* rec;
upd_t* update;
ulint pos;
trx_id_t trx_id;
......@@ -4214,13 +4210,14 @@ btr_cur_parse_update_in_place(
ptr = row_upd_index_parse(ptr, end_ptr, heap, &update);
if (!ptr || !page) {
goto func_exit;
if (!ptr || !block) {
func_exit:
mem_heap_free(heap);
return ptr;
}
ut_a((ibool)!!page_is_comp(page) == dict_table_is_comp(index->table));
rec = page + rec_offset;
ut_a(!!page_is_comp(block->frame) == index->table->not_redundant());
rec_t* rec = block->frame + rec_offset;
/* We do not need to reserve search latch, as the page is only
being recovered, and there cannot be a hash index to it. */
......@@ -4233,13 +4230,13 @@ btr_cur_parse_update_in_place(
flags != (BTR_NO_UNDO_LOG_FLAG
| BTR_NO_LOCKING_FLAG
| BTR_KEEP_SYS_FLAG)
|| page_is_leaf(page),
|| page_is_leaf(block->frame),
ULINT_UNDEFINED, &heap);
if (flags & BTR_KEEP_SYS_FLAG) {
} else if (page_zip) {
} else if (UNIV_LIKELY_NULL(block->page.zip.data)) {
page_zip_write_trx_id_and_roll_ptr(
page_zip, rec, offsets, pos, trx_id, roll_ptr, NULL);
block, rec, offsets, pos, trx_id, roll_ptr, mtr);
} else {
ulint len;
byte* field = rec_get_nth_field(rec, offsets, pos, &len);
......@@ -4249,12 +4246,8 @@ btr_cur_parse_update_in_place(
trx_write_roll_ptr(field + DATA_TRX_ID_LEN, roll_ptr);
}
row_upd_rec_in_place(rec, index, offsets, update, page_zip, mtr);
func_exit:
mem_heap_free(heap);
return(ptr);
row_upd_rec_in_place(block, rec, index, offsets, update, mtr);
goto func_exit;
}
/*************************************************************//**
......@@ -4447,8 +4440,7 @@ void btr_cur_upd_rec_in_place(rec_t *rec, const dict_index_t *index,
}
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
page_zip_write_rec(&block->page.zip, rec, index, offsets, 0,
mtr);
page_zip_write_rec(block, rec, index, offsets, 0, mtr);
}
}
......@@ -5545,7 +5537,7 @@ void btr_rec_set_deleted(buf_block_t *block, rec_t *rec, mtr_t *mtr)
else if (UNIV_LIKELY_NULL(block->page.zip.data))
{
*b= v;
page_zip_rec_set_deleted(&block->page.zip, rec, flag, mtr);
page_zip_rec_set_deleted(block, rec, flag, mtr);
}
else
mtr->write<1>(*block, b, v);
......@@ -5574,8 +5566,7 @@ btr_cur_parse_del_mark_set_clust_rec(
/*=================================*/
const byte* ptr, /*!< in: buffer */
const byte* end_ptr,/*!< in: buffer end */
page_t* page, /*!< in/out: page or NULL */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
buf_block_t* block, /*!< in/out: page or NULL */
dict_index_t* index, /*!< in: index corresponding to page */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
......@@ -5585,10 +5576,10 @@ btr_cur_parse_del_mark_set_clust_rec(
trx_id_t trx_id;
roll_ptr_t roll_ptr;
ulint offset;
rec_t* rec;
ut_ad(!page
|| !!page_is_comp(page) == dict_table_is_comp(index->table));
ut_ad(!block
|| !!page_is_comp(block->frame)
== index->table->not_redundant());
if (end_ptr < ptr + 2) {
......@@ -5621,14 +5612,14 @@ btr_cur_parse_del_mark_set_clust_rec(
always refer to an existing undo log record. */
ut_ad(trx_id || (flags & BTR_KEEP_SYS_FLAG));
if (page) {
rec = page + offset;
if (block) {
rec_t* rec = block->frame + offset;
/* We do not need to reserve search latch, as the page
is only being recovered, and there cannot be a hash index to
it. Besides, these fields are being updated in place
and the adaptive hash index does not depend on them. */
byte* b = rec - (page_is_comp(page)
byte* b = rec - (page_is_comp(block->frame)
? REC_NEW_INFO_BITS
: REC_OLD_INFO_BITS);
......@@ -5638,8 +5629,8 @@ btr_cur_parse_del_mark_set_clust_rec(
*b &= ~REC_INFO_DELETED_FLAG;
}
if (UNIV_LIKELY_NULL(page_zip)) {
page_zip_rec_set_deleted(page_zip, rec, val, mtr);
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
page_zip_rec_set_deleted(block, rec, val, mtr);
}
/* pos is the offset of DB_TRX_ID in the clustered index.
......@@ -5656,9 +5647,9 @@ btr_cur_parse_del_mark_set_clust_rec(
offset_t* offsets = rec_get_offsets(rec, index,
offsets_, true,
pos + 2, &heap);
if (page_zip) {
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
page_zip_write_trx_id_and_roll_ptr(
page_zip, rec, offsets, pos, trx_id,
block, rec, offsets, pos, trx_id,
roll_ptr, mtr);
} else {
ulint len;
......@@ -5778,13 +5769,11 @@ btr_cur_parse_del_mark_set_sec_rec(
/*===============================*/
const byte* ptr, /*!< in: buffer */
const byte* end_ptr,/*!< in: buffer end */
page_t* page, /*!< in/out: page or NULL */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
buf_block_t* block, /*!< in/out: page or NULL */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
ulint val;
ulint offset;
rec_t* rec;
if (end_ptr < ptr + 3) {
......@@ -5799,14 +5788,17 @@ btr_cur_parse_del_mark_set_sec_rec(
ut_a(offset <= srv_page_size);
if (page) {
rec = page + offset;
if (!block) {
return ptr;
}
rec_t* rec = block->frame + offset;
/* We do not need to reserve search latch, as the page
is only being recovered, and there cannot be a hash index to
it. Besides, the delete-mark flag is being updated in place
and the adaptive hash index does not depend on it. */
byte* b = page + offset - (page_is_comp(page)
byte* b = rec - (page_is_comp(block->frame)
? REC_NEW_INFO_BITS
: REC_OLD_INFO_BITS);
......@@ -5816,12 +5808,11 @@ btr_cur_parse_del_mark_set_sec_rec(
*b &= ~REC_INFO_DELETED_FLAG;
}
if (UNIV_LIKELY_NULL(page_zip)) {
page_zip_rec_set_deleted(page_zip, rec, val, mtr);
}
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
page_zip_rec_set_deleted(block, rec, val, mtr);
}
return(ptr);
return ptr;
}
/*==================== B-TREE RECORD REMOVE =========================*/
......
......@@ -298,8 +298,7 @@ rtr_update_mbr_field(
ut_ad(rec_info & REC_INFO_MIN_REC_FLAG);
#endif /* UNIV_DEBUG */
memcpy(rec, node_ptr->fields[0].data, DATA_MBR_LEN);
page_zip_write_rec(page_zip, rec, index, offsets, 0,
mtr);
page_zip_write_rec(block, rec, index, offsets, 0, mtr);
} else {
mtr->memcpy(block, page_offset(rec),
node_ptr->fields[0].data, DATA_MBR_LEN);
......
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2015, 2019, MariaDB Corporation.
Copyright (c) 2015, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -55,7 +55,7 @@ void btr_page_set_level(buf_block_t *block, ulint level, mtr_t *mtr)
if (UNIV_LIKELY_NULL(block->page.zip.data))
{
mach_write_to_2(page_level, level);
page_zip_write_header(&block->page.zip, page_level, 2, mtr);
page_zip_write_header(block, page_level, 2, mtr);
}
else
mtr->write<2,mtr_t::OPT>(*block, page_level, level);
......@@ -71,7 +71,7 @@ inline void btr_page_set_next(buf_block_t *block, ulint next, mtr_t *mtr)
if (UNIV_LIKELY_NULL(block->page.zip.data))
{
mach_write_to_4(fil_page_next, next);
page_zip_write_header(&block->page.zip, fil_page_next, 4, mtr);
page_zip_write_header(block, fil_page_next, 4, mtr);
}
else
mtr->write<4>(*block, fil_page_next, next);
......@@ -87,7 +87,7 @@ inline void btr_page_set_prev(buf_block_t *block, ulint prev, mtr_t *mtr)
if (UNIV_LIKELY_NULL(block->page.zip.data))
{
mach_write_to_4(fil_page_prev, prev);
page_zip_write_header(&block->page.zip, fil_page_prev, 4, mtr);
page_zip_write_header(block, fil_page_prev, 4, mtr);
}
else
mtr->write<4>(*block, fil_page_prev, prev);
......
......@@ -564,8 +564,7 @@ btr_cur_parse_update_in_place(
/*==========================*/
const byte* ptr, /*!< in: buffer */
const byte* end_ptr,/*!< in: buffer end */
page_t* page, /*!< in/out: page or NULL */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
buf_block_t* block, /*!< in/out: page or NULL */
dict_index_t* index, /*!< in: index corresponding to page */
mtr_t* mtr); /*!< in/out: mini-transaction */
/****************************************************************//**
......@@ -578,8 +577,7 @@ btr_cur_parse_del_mark_set_clust_rec(
/*=================================*/
const byte* ptr, /*!< in: buffer */
const byte* end_ptr,/*!< in: buffer end */
page_t* page, /*!< in/out: page or NULL */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
buf_block_t* block, /*!< in/out: page or NULL */
dict_index_t* index, /*!< in: index corresponding to page */
mtr_t* mtr); /*!< in/out: mini-transaction */
/****************************************************************//**
......@@ -592,8 +590,7 @@ btr_cur_parse_del_mark_set_sec_rec(
/*===============================*/
const byte* ptr, /*!< in: buffer */
const byte* end_ptr,/*!< in: buffer end */
page_t* page, /*!< in/out: page or NULL */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
buf_block_t* block, /*!< in/out: page or NULL */
mtr_t* mtr); /*!< in/out: mini-transaction */
/** Estimates the number of rows in a given index range.
......
......@@ -161,22 +161,6 @@ mlog_write_initial_log_record_low(
mtr_t* mtr);
/********************************************************//**
Writes the initial part of a log record (3..11 bytes).
If the implementation of this function is changed, all
size parameters to mlog_open() should be adjusted accordingly!
@return new value of log_ptr */
UNIV_INLINE
byte*
mlog_write_initial_log_record_fast(
/*===============================*/
const byte* ptr, /*!< in: pointer to (inside) a buffer
frame holding the file page where
modification is made */
mlog_id_t type, /*!< in: log item type: MLOG_1BYTE, ... */
byte* log_ptr,/*!< in: pointer to mtr log which has
been opened */
mtr_t* mtr); /*!< in: mtr */
/********************************************************//**
Parses an initial log record written by mlog_write_initial_log_record_low().
@return parsed record end, NULL if not a complete record */
const byte*
......
......@@ -149,49 +149,3 @@ mlog_write_initial_log_record_low(
mtr->added_rec();
return(log_ptr);
}
/********************************************************//**
Writes the initial part of a log record (3..11 bytes).
If the implementation of this function is changed, all
size parameters to mlog_open() should be adjusted accordingly!
@return new value of log_ptr */
UNIV_INLINE
byte*
mlog_write_initial_log_record_fast(
/*===============================*/
const byte* ptr, /*!< in: pointer to (inside) a buffer
frame holding the file page where
modification is made */
mlog_id_t type, /*!< in: log item type: MLOG_1BYTE, ... */
byte* log_ptr,/*!< in: pointer to mtr log which has
been opened */
mtr_t* mtr) /*!< in/out: mtr */
{
const byte* page;
ulint space;
ulint offset;
ut_ad(log_ptr);
ut_d(mtr->memo_modify_page(ptr));
page = (const byte*) ut_align_down(ptr, srv_page_size);
space = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
offset = mach_read_from_4(page + FIL_PAGE_OFFSET);
/* check whether the page is in the doublewrite buffer;
the doublewrite buffer is located in pages
FSP_EXTENT_SIZE, ..., 3 * FSP_EXTENT_SIZE - 1 in the
system tablespace */
if (space == TRX_SYS_SPACE
&& offset >= FSP_EXTENT_SIZE && offset < 3 * FSP_EXTENT_SIZE) {
ut_ad(buf_dblwr_being_created);
/* Do nothing: we only come to this branch in an
InnoDB database creation. We do not redo log
anything for the doublewrite buffer pages. */
return(log_ptr);
}
return(mlog_write_initial_log_record_low(type, space, offset,
log_ptr, mtr));
}
......@@ -96,7 +96,7 @@ page_set_ssn_id(
byte* ssn = block->frame + FIL_RTREE_SPLIT_SEQ_NUM;
if (UNIV_LIKELY_NULL(page_zip)) {
mach_write_to_8(ssn, ssn_id);
page_zip_write_header(page_zip, ssn, 8, mtr);
page_zip_write_header(block, ssn, 8, mtr);
} else {
mtr->write<8,mtr_t::OPT>(*block, ssn, ssn_id);
}
......@@ -138,7 +138,7 @@ inline void page_header_reset_last_insert(buf_block_t *block, mtr_t *mtr)
if (UNIV_LIKELY_NULL(block->page.zip.data))
{
mach_write_to_2(b, 0);
page_zip_write_header(&block->page.zip, b, 2, mtr);
page_zip_write_header(block, b, 2, mtr);
}
else
mtr->write<2,mtr_t::OPT>(*block, b, 0U);
......
......@@ -237,21 +237,21 @@ UNIV_INLINE
void
page_zip_write_header(
/*==================*/
page_zip_des_t* page_zip,/*!< in/out: compressed page */
buf_block_t* block, /*!< in/out: compressed page */
const byte* str, /*!< in: address on the uncompressed page */
ulint length, /*!< in: length of the data */
mtr_t* mtr) /*!< in: mini-transaction, or NULL */
MY_ATTRIBUTE((nonnull(1,2)));
mtr_t* mtr) /*!< in/out: mini-transaction */
MY_ATTRIBUTE((nonnull));
/** Write an entire record to the ROW_FORMAT=COMPRESSED page.
The data must already have been written to the uncompressed page.
@param[in,out] page_zip ROW_FORMAT=COMPRESSED page
@param[in,out] block ROW_FORMAT=COMPRESSED page
@param[in] rec record in the uncompressed page
@param[in] index the index that the page belongs to
@param[in] offsets rec_get_offsets(rec, index)
@param[in] create nonzero=insert, zero=update
@param[in,out] mtr mini-transaction */
void page_zip_write_rec(page_zip_des_t *page_zip, const byte *rec,
void page_zip_write_rec(buf_block_t *block, const byte *rec,
const dict_index_t *index, const offset_t *offsets,
ulint create, mtr_t *mtr)
MY_ATTRIBUTE((nonnull));
......@@ -308,23 +308,23 @@ page_zip_write_node_ptr(
MY_ATTRIBUTE((nonnull));
/** Write the DB_TRX_ID,DB_ROLL_PTR into a clustered index leaf page record.
@param[in,out] page_zip compressed page
@param[in,out] block ROW_FORMAT=COMPRESSED page
@param[in,out] rec record
@param[in] offsets rec_get_offsets(rec, index)
@param[in] trx_id_field field number of DB_TRX_ID (number of PK fields)
@param[in] trx_id DB_TRX_ID value (transaction identifier)
@param[in] roll_ptr DB_ROLL_PTR value (undo log pointer)
@param[in,out] mtr mini-transaction, or NULL to skip logging */
@param[in,out] mtr mini-transaction */
void
page_zip_write_trx_id_and_roll_ptr(
page_zip_des_t* page_zip,
buf_block_t* block,
byte* rec,
const offset_t* offsets,
ulint trx_id_col,
trx_id_t trx_id,
roll_ptr_t roll_ptr,
mtr_t* mtr)
MY_ATTRIBUTE((nonnull(1,2,3)));
MY_ATTRIBUTE((nonnull));
/** Parse a MLOG_ZIP_WRITE_TRX_ID record.
@param[in] ptr redo log buffer
......@@ -348,24 +348,12 @@ already have been written on the uncompressed page. */
void
page_zip_rec_set_deleted(
/*=====================*/
page_zip_des_t* page_zip,/*!< in/out: compressed page */
buf_block_t* block, /*!< in/out: ROW_FORMAT=COMPRESSED page */
const byte* rec, /*!< in: record on the uncompressed page */
ulint flag, /*!< in: the deleted flag (nonzero=TRUE) */
mtr_t* mtr) /*!< in,out: mini-transaction */
MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Write the "owned" flag of a record on a compressed page. The n_owned field
must already have been written on the uncompressed page. */
void
page_zip_rec_set_owned(
/*===================*/
buf_block_t* block, /*!< in/out: ROW_FORMAT=COMPRESSED page */
const byte* rec, /*!< in: record on the uncompressed page */
ulint flag, /*!< in: the owned flag (nonzero=TRUE) */
mtr_t* mtr) /*!< in/out: mini-transaction */
MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Insert a record to the dense page directory. */
void
......@@ -378,19 +366,17 @@ page_zip_dir_insert(
mtr_t* mtr) /*!< in/out: mini-transaction */
MY_ATTRIBUTE((nonnull(1,3,4)));
/**********************************************************************//**
Shift the dense page directory and the array of BLOB pointers
when a record is deleted. */
void
page_zip_dir_delete(
/*================*/
page_zip_des_t* page_zip, /*!< in/out: compressed page */
byte* rec, /*!< in: deleted record */
const dict_index_t* index, /*!< in: index of rec */
const offset_t* offsets, /*!< in: rec_get_offsets(rec) */
const byte* free, /*!< in: previous start of
the free list */
mtr_t* mtr) /*!< in/out: mini-transaction */
/** Shift the dense page directory and the array of BLOB pointers
when a record is deleted.
@param[in,out] block index page
@param[in,out] rec record being deleted
@param[in] index the index that the page belongs to
@param[in] offsets rec_get_offsets(rec, index)
@param[in] free previous start of the free list
@param[in,out] mtr mini-transaction */
void page_zip_dir_delete(buf_block_t *block, byte *rec,
const dict_index_t *index, const offset_t *offsets,
const byte *free, mtr_t *mtr)
MY_ATTRIBUTE((nonnull(1,2,3,4,6)));
/***********************************************************//**
......
......@@ -2,7 +2,7 @@
Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2017, 2019, MariaDB Corporation.
Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -329,30 +329,33 @@ UNIV_INLINE
void
page_zip_write_header(
/*==================*/
page_zip_des_t* page_zip,/*!< in/out: compressed page */
buf_block_t* block, /*!< in/out: compressed page */
const byte* str, /*!< in: address on the uncompressed page */
ulint length, /*!< in: length of the data */
mtr_t* mtr) /*!< in: mini-transaction, or NULL */
{
ulint pos;
ut_ad(page_align(str) == block->frame);
ut_ad(page_zip_simple_validate(page_zip));
UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
pos = page_offset(str);
const uint16_t pos = page_offset(str);
ut_ad(pos < PAGE_DATA);
ut_ad(pos + length < PAGE_DATA);
page_zip_des_t* page_zip = &block->page.zip;
ut_ad(page_zip_simple_validate(page_zip));
UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
memcpy(page_zip->data + pos, str, length);
/* The following would fail in page_cur_insert_rec_zip(). */
/* ut_ad(page_zip_validate(page_zip, str - pos)); */
if (!mtr) {
} else if (byte* log_ptr = mlog_open(mtr, 11 + 2 + 2 + length)) {
log_ptr = mlog_write_initial_log_record_fast(
str, MLOG_ZIP_WRITE_STRING, log_ptr, mtr);
if (byte* log_ptr = mlog_open(mtr, 11 + 2 + 2 + length)) {
log_ptr = mlog_write_initial_log_record_low(
MLOG_ZIP_WRITE_STRING,
block->page.id.space(), block->page.id.page_no(),
log_ptr, mtr);
mach_write_to_2(log_ptr, pos);
mach_write_to_2(log_ptr + 2, length);
memcpy(log_ptr + 4, str, length);
......
......@@ -1620,17 +1620,17 @@ recv_parse_or_apply_log_rec_body(
ptr, end_ptr,
type == MLOG_COMP_REC_CLUST_DELETE_MARK,
&index))) {
ut_a(!page
|| (ibool)!!page_is_comp(page)
== dict_table_is_comp(index->table));
ut_a(!block
|| !!page_is_comp(block->frame)
== index->table->not_redundant());
ptr = btr_cur_parse_del_mark_set_clust_rec(
ptr, end_ptr, page, page_zip, index, mtr);
ptr, end_ptr, block, index, mtr);
}
break;
case MLOG_REC_SEC_DELETE_MARK:
ut_ad(!page || fil_page_type_is_index(page_type));
ptr = btr_cur_parse_del_mark_set_sec_rec(ptr, end_ptr,
page, page_zip, mtr);
block, mtr);
break;
case MLOG_REC_UPDATE_IN_PLACE: case MLOG_COMP_REC_UPDATE_IN_PLACE:
if (!page_zip) {
......@@ -1649,8 +1649,8 @@ recv_parse_or_apply_log_rec_body(
ut_a(!page
|| (ibool)!!page_is_comp(page)
== dict_table_is_comp(index->table));
ptr = btr_cur_parse_update_in_place(ptr, end_ptr, page,
page_zip, index,
ptr = btr_cur_parse_update_in_place(ptr, end_ptr,
block, index,
mtr);
}
break;
......
......@@ -1000,7 +1000,7 @@ inline void page_direction_reset(buf_block_t *block, byte *ptr, mtr_t *mtr)
{
*ptr= PAGE_NO_DIRECTION; /* no instant ALTER bits */
memset_aligned<2>(ptr + 1, 0, 2);
page_zip_write_header(&block->page.zip, ptr, 3, mtr);
page_zip_write_header(block, ptr, 3, mtr);
}
else
{
......@@ -1028,7 +1028,7 @@ inline void page_direction_increment(buf_block_t *block, byte *ptr, uint dir,
{
*ptr= static_cast<byte>(dir);
mach_write_to_2(ptr + 1, 1 + mach_read_from_2(ptr + 1));
page_zip_write_header(&block->page.zip, ptr, 3, mtr);
page_zip_write_header(block, ptr, 3, mtr);
}
else
{
......@@ -1098,7 +1098,7 @@ static void page_dir_split_slot(buf_block_t *block, ulint s, mtr_t* mtr)
/* Log changes to the compressed page header and the dense page
directory. */
mach_write_to_2(n_slots_p, n_slots + 1);
page_zip_write_header(&block->page.zip, n_slots_p, 2, mtr);
page_zip_write_header(block, n_slots_p, 2, mtr);
mach_write_to_2(slot, page_offset(rec));
page_rec_set_n_owned<true>(block, page_dir_slot_get_rec(slot), half_owned,
true, mtr);
......@@ -1146,7 +1146,6 @@ static void page_dir_balance_slot(buf_block_t *block, ulint s, mtr_t *mtr)
page_dir_slot_t* slot = page_dir_get_nth_slot(block->frame, s);
page_dir_slot_t* up_slot = slot - PAGE_DIR_SLOT_SIZE;
const ulint up_n_owned = page_dir_slot_get_n_owned(up_slot);
page_zip_des_t* page_zip = buf_block_get_page_zip(block);
ut_ad(page_dir_slot_get_n_owned(slot)
== PAGE_DIR_SLOT_MIN_N_OWNED - 1);
......@@ -1165,11 +1164,11 @@ static void page_dir_balance_slot(buf_block_t *block, ulint s, mtr_t *mtr)
block->frame, n_slots - 1);
memmove_aligned<2>(last_slot + PAGE_DIR_SLOT_SIZE, last_slot,
slot - last_slot);
if (UNIV_LIKELY_NULL(page_zip)) {
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
memset_aligned<2>(last_slot, 0, 2);
mach_write_to_2(PAGE_N_DIR_SLOTS + PAGE_HEADER
+ block->frame, n_slots - 1);
page_zip_write_header(page_zip,
page_zip_write_header(block,
PAGE_N_DIR_SLOTS + PAGE_HEADER
+ block->frame, 2, mtr);
} else {
......@@ -1197,7 +1196,7 @@ static void page_dir_balance_slot(buf_block_t *block, ulint s, mtr_t *mtr)
page_rec_set_n_owned<true>(block, new_rec,
PAGE_DIR_SLOT_MIN_N_OWNED,
true, mtr);
if (UNIV_LIKELY_NULL(page_zip)) {
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
mach_write_to_2(slot, page_offset(new_rec));
goto func_exit;
}
......@@ -1250,7 +1249,7 @@ static byte* page_mem_alloc_heap(buf_block_t *block, ulint need,
if (compressed)
{
ut_ad(h & 0x8000);
page_zip_write_header(&block->page.zip, heap_top, 4, mtr);
page_zip_write_header(block, heap_top, 4, mtr);
}
else
mtr->memcpy(*block, PAGE_HEAP_TOP + PAGE_HEADER, 4);
......@@ -1859,7 +1858,7 @@ page_cur_insert_rec_zip(
ut_ad(mach_read_from_2(garbage) >= rec_size);
mach_write_to_2(garbage, mach_read_from_2(garbage) - rec_size);
compile_time_assert(PAGE_GARBAGE == PAGE_FREE + 2);
page_zip_write_header(page_zip,
page_zip_write_header(cursor->block,
PAGE_HEADER + PAGE_FREE + page, 4, mtr);
/* TODO: group with PAGE_LAST_INSERT */
......@@ -1941,7 +1940,7 @@ page_cur_insert_rec_zip(
(insert_rec - cursor->rec));
byte* n_recs = PAGE_N_RECS + PAGE_HEADER + page;
mach_write_to_2(n_recs, mach_read_from_2(n_recs) + 1);
page_zip_write_header(page_zip, n_recs, 2, mtr);
page_zip_write_header(cursor->block, n_recs, 2, mtr);
/* 5. Set the n_owned field in the inserted record to zero,
and set the heap_no field */
......@@ -1963,7 +1962,7 @@ page_cur_insert_rec_zip(
== rec_get_node_ptr_flag(insert_rec));
/* TODO: combine with PAGE_DIRECTION changes */
mach_write_to_2(last_insert, page_offset(insert_rec));
page_zip_write_header(page_zip, last_insert, 2, mtr);
page_zip_write_header(cursor->block, last_insert, 2, mtr);
if (!index->is_spatial()) {
byte* ptr = PAGE_HEADER + PAGE_DIRECTION_B + page;
......@@ -2004,7 +2003,7 @@ page_cur_insert_rec_zip(
}
}
page_zip_write_rec(page_zip, insert_rec, index, offsets, 1, mtr);
page_zip_write_rec(cursor->block, insert_rec, index, offsets, 1, mtr);
return insert_rec;
}
......@@ -2319,7 +2318,7 @@ static void page_mem_free(buf_block_t *block, rec_t *rec,
const rec_t *free= page_header_get_ptr(block->frame, PAGE_FREE);
if (UNIV_LIKELY_NULL(block->page.zip.data))
page_zip_dir_delete(&block->page.zip, rec, index, offsets, free, mtr);
page_zip_dir_delete(block, rec, index, offsets, free, mtr);
else
{
if (srv_immediate_scrub_data_uncompressed)
......
......@@ -205,7 +205,7 @@ page_set_max_trx_id(
if (UNIV_LIKELY_NULL(page_zip))
{
mach_write_to_8(max_trx_id, trx_id);
page_zip_write_header(page_zip, max_trx_id, 8, mtr);
page_zip_write_header(block, max_trx_id, 8, mtr);
}
else
mtr->write<8>(*block, max_trx_id, trx_id);
......@@ -233,10 +233,10 @@ page_set_autoinc(
ib_uint64_t old= mach_read_from_8(field);
if (old == autoinc || (old > autoinc && !reset))
/* nothing to update */;
else if (page_zip_des_t* page_zip = buf_block_get_page_zip(block))
else if (UNIV_LIKELY_NULL(block->page.zip.data))
{
mach_write_to_8(field, autoinc);
page_zip_write_header(page_zip, field, 8, mtr);
page_zip_write_header(block, field, 8, mtr);
}
else
mtr->write<8>(*block, field, autoinc);
......@@ -981,7 +981,7 @@ page_delete_rec_list_end(
ut_ad(page_is_comp(block->frame));
memset(last_insert, 0, 2);
page_zip_write_header(page_zip, last_insert, 2, mtr);
page_zip_write_header(block, last_insert, 2, mtr);
do {
page_cur_t cur;
......
This diff is collapsed.
......@@ -1801,7 +1801,7 @@ PageConverter::adjust_cluster_record(
if (UNIV_LIKELY_NULL(m_rec_iter.current_block()
->page.zip.data)) {
page_zip_write_trx_id_and_roll_ptr(
&m_rec_iter.current_block()->page.zip,
m_rec_iter.current_block(),
rec, m_offsets, trx_id_pos,
0, roll_ptr_t(1) << ROLL_PTR_INSERT_FLAG_POS,
&m_rec_iter.m_mtr);
......
/*****************************************************************************
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2019, MariaDB Corporation.
Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -693,11 +693,10 @@ static void row_purge_reset_trx_id(purge_node_t* node, mtr_t* mtr)
rec, index, offsets)));
index->set_modified(*mtr);
if (page_zip_des_t* page_zip
= buf_block_get_page_zip(
btr_pcur_get_block(&node->pcur))) {
buf_block_t* block = btr_pcur_get_block(&node->pcur);
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
page_zip_write_trx_id_and_roll_ptr(
page_zip, rec, offsets, trx_id_pos,
block, rec, offsets, trx_id_pos,
0, 1ULL << ROLL_PTR_INSERT_FLAG_POS,
mtr);
} else {
......@@ -705,8 +704,6 @@ static void row_purge_reset_trx_id(purge_node_t* node, mtr_t* mtr)
byte* ptr = rec_get_nth_field(
rec, offsets, trx_id_pos, &len);
ut_ad(len == DATA_TRX_ID_LEN);
buf_block_t* block = btr_pcur_get_block(
&node->pcur);
uint16_t offs = page_offset(ptr);
mtr->memset(block, offs, DATA_TRX_ID_LEN, 0);
offs += DATA_TRX_ID_LEN;
......
......@@ -496,14 +496,13 @@ row_undo_mod_clust(
rec, dict_table_is_comp(node->table))
|| rec_is_alter_metadata(rec, *index));
index->set_modified(mtr);
if (page_zip_des_t* page_zip = buf_block_get_page_zip(
btr_pcur_get_block(pcur))) {
buf_block_t* block = btr_pcur_get_block(pcur);
if (UNIV_LIKELY_NULL(block->page.zip.data)) {
page_zip_write_trx_id_and_roll_ptr(
page_zip, rec, offsets, trx_id_pos,
block, rec, offsets, trx_id_pos,
0, 1ULL << ROLL_PTR_INSERT_FLAG_POS,
&mtr);
} else {
buf_block_t* block = btr_pcur_get_block(pcur);
uint16_t offs = page_offset(rec
+ trx_id_offset);
mtr.memset(block, offs, DATA_TRX_ID_LEN, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment