Commit fc876980 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-12353: Write less log for BLOB pages

fsp_page_create(): Always initialize the page. The logic to
avoid initialization was made redundant and should have been removed
in mysql/mysql-server@ce0a1e85e24e48b8171f767b44330da635a6ea0a
(MySQL 5.7.5).

btr_store_big_rec_extern_fields(): Remove the redundant initialization
of FIL_PAGE_PREV and FIL_PAGE_NEXT. An INIT_PAGE record will have
been written already. Only write the ROW_FORMAT=COMPRESSED page payload
from FIL_PAGE_DATA onwards. We were unnecessarily writing from
FIL_PAGE_TYPE onwards, which caused an assertion failure on recovery:

	recv_sys_t::alloc(size_t): Assertion 'len <= srv_page_size' failed

when running the following tests:

	./mtr --no-reorder innodb_zip.blob,4k innodb_zip.bug56680,4k
parent 5874aac7
......@@ -505,10 +505,7 @@ btr_page_alloc_for_ibuf(
/**************************************************************//**
Allocates a new file page to be used in an index tree. NOTE: we assume
that the caller has made the reservation for free extents!
@retval NULL if no page could be allocated
@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
@retval NULL if no page could be allocated */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
buf_block_t*
btr_page_alloc_low(
......@@ -523,10 +520,7 @@ btr_page_alloc_low(
for the allocation */
mtr_t* init_mtr) /*!< in/out: mtr or another
mini-transaction in which the
page should be initialized.
If init_mtr!=mtr, but the page
is already X-latched in mtr, do
not initialize the page. */
page should be initialized. */
{
page_t* root = btr_root_get(index, mtr);
......@@ -541,7 +535,7 @@ btr_page_alloc_low(
buf_block_t* block = fseg_alloc_free_page_general(
seg_header, hint_page_no, file_direction,
TRUE, mtr, init_mtr);
true, mtr, init_mtr);
#ifdef UNIV_DEBUG_SCRUBBING
if (block != NULL) {
......@@ -565,10 +559,7 @@ btr_page_alloc_low(
/**************************************************************//**
Allocates a new file page to be used in an index tree. NOTE: we assume
that the caller has made the reservation for free extents!
@retval NULL if no page could be allocated
@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
@retval NULL if no page could be allocated */
buf_block_t*
btr_page_alloc(
/*===========*/
......
......@@ -7215,7 +7215,6 @@ btr_store_big_rec_extern_fields(
ulint hint_page_no;
ulint i;
mtr_t mtr;
mtr_t mtr_bulk;
mem_heap_t* heap = NULL;
page_zip_des_t* page_zip;
z_stream c_stream;
......@@ -7345,35 +7344,19 @@ btr_store_big_rec_extern_fields(
hint_page_no = prev_page_no + 1;
}
mtr_t *alloc_mtr;
if (UNIV_UNLIKELY(op == BTR_STORE_INSERT_BULK)) {
mtr_bulk.start();
mtr_bulk.set_spaces(mtr);
alloc_mtr = &mtr_bulk;
} else {
alloc_mtr = &mtr;
}
if (!fsp_reserve_free_extents(&r_extents,
index->table->space, 1,
FSP_BLOB, alloc_mtr,
1)) {
alloc_mtr->commit();
FSP_BLOB, &mtr, 1)) {
mtr.commit();
error = DB_OUT_OF_FILE_SPACE;
goto func_exit;
}
block = btr_page_alloc(index, hint_page_no, FSP_NO_DIR,
0, alloc_mtr, &mtr);
0, &mtr, &mtr);
index->table->space->release_free_extents(r_extents);
if (UNIV_UNLIKELY(op == BTR_STORE_INSERT_BULK)) {
mtr_bulk.commit();
}
ut_a(block != NULL);
page_no = block->page.id.page_no();
......@@ -7411,14 +7394,20 @@ btr_store_big_rec_extern_fields(
row_log_table_blob_alloc(index, page_no);
}
ut_ad(!page_has_siblings(block->frame));
ut_ad(!fil_page_get_type(block->frame));
if (page_zip) {
int err;
page_zip_des_t* blob_page_zip;
mach_write_to_2(block->frame + FIL_PAGE_TYPE,
prev_page_no == FIL_NULL
? FIL_PAGE_TYPE_ZBLOB
: FIL_PAGE_TYPE_ZBLOB2);
mtr.write<1>(*block,
FIL_PAGE_TYPE + 1 + block->frame,
prev_page_no == FIL_NULL
? FIL_PAGE_TYPE_ZBLOB
: FIL_PAGE_TYPE_ZBLOB2);
block->page.zip.data[FIL_PAGE_TYPE + 1]
= block->frame[FIL_PAGE_TYPE + 1];
c_stream.next_out = block->frame
+ FIL_PAGE_DATA;
......@@ -7430,22 +7419,11 @@ btr_store_big_rec_extern_fields(
ut_a(err == Z_STREAM_END
|| c_stream.avail_out == 0);
compile_time_assert(FIL_PAGE_NEXT
== FIL_PAGE_PREV + 4);
compile_time_assert(FIL_NULL == 0xffffffff);
mtr.memset(block, FIL_PAGE_PREV, 8, 0xff);
mtr.memcpy(*block,
FIL_PAGE_TYPE,
FIL_PAGE_DATA,
page_zip_get_size(page_zip)
- FIL_PAGE_TYPE
- FIL_PAGE_DATA
- c_stream.avail_out);
/* Zero out the unused part of the page. */
if (c_stream.avail_out) {
mtr.memset(block,
page_zip_get_size(page_zip)
- c_stream.avail_out,
c_stream.avail_out, 0);
}
/* Copy the page to compressed storage,
because it will be flushed to disk
from there. */
......@@ -7505,7 +7483,7 @@ btr_store_big_rec_extern_fields(
break;
}
} else {
mtr.write<2>(*block, FIL_PAGE_TYPE
mtr.write<1>(*block, FIL_PAGE_TYPE + 1
+ block->frame,
FIL_PAGE_TYPE_BLOB);
......
......@@ -104,14 +104,8 @@ direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction
@param[in,out] init_mtr mtr or another mini-transaction in
which the page should be initialized. If init_mtr != mtr, but the page is
already latched in mtr, do not initialize the page
@param[in] has_done_reservation TRUE if the space has already been
reserved, in this case we will never return NULL
@retval NULL if no page could be allocated
@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
which the page should be initialized.
@retval NULL if no page could be allocated */
static
buf_block_t*
fseg_alloc_free_page_low(
......@@ -121,12 +115,12 @@ fseg_alloc_free_page_low(
ulint hint,
byte direction,
rw_lock_type_t rw_latch,
mtr_t* mtr,
mtr_t* init_mtr
#ifdef UNIV_DEBUG
, ibool has_done_reservation
bool has_done_reservation,
/*!< whether the space has already been reserved */
#endif /* UNIV_DEBUG */
)
mtr_t* mtr,
mtr_t* init_mtr)
MY_ATTRIBUTE((warn_unused_result));
/** Get the tablespace header block, SX-latched
......@@ -1063,16 +1057,12 @@ fsp_alloc_from_free_frag(buf_block_t *header, buf_block_t *xdes, xdes_t *descr,
}
/** Gets a buffer block for an allocated page.
NOTE: If init_mtr != mtr, the block will only be initialized if it was
not previously x-latched. It is assumed that the block has been
x-latched only by mtr, and freed in mtr in that case.
@param[in,out] space tablespace
@param[in] offset page number of the allocated page
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction of the allocation
@param[in,out] init_mtr mini-transaction for initializing the page
@return block, initialized if init_mtr==mtr
or rw_lock_x_lock_count(&block->lock) == 1 */
@return block, initialized */
static
buf_block_t*
fsp_page_create(
......@@ -1085,34 +1075,21 @@ fsp_page_create(
buf_block_t* block = buf_page_create(page_id_t(space->id, offset),
space->zip_size(), init_mtr);
ut_d(bool latched = mtr_memo_contains_flagged(mtr, block,
MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_SX_FIX));
ut_ad(rw_latch == RW_X_LATCH || rw_latch == RW_SX_LATCH);
/* Mimic buf_page_get(), but avoid the buf_pool->page_hash lookup. */
mtr_memo_type_t memo;
if (rw_latch == RW_X_LATCH) {
rw_lock_x_lock(&block->lock);
memo = MTR_MEMO_PAGE_X_FIX;
} else {
ut_ad(rw_latch == RW_SX_LATCH);
rw_lock_sx_lock(&block->lock);
memo = MTR_MEMO_PAGE_SX_FIX;
}
mtr_memo_push(init_mtr, block, memo);
buf_block_buf_fix_inc(block, __FILE__, __LINE__);
mtr_memo_push(init_mtr, block, rw_latch == RW_X_LATCH
? MTR_MEMO_PAGE_X_FIX : MTR_MEMO_PAGE_SX_FIX);
if (init_mtr == mtr
|| (rw_latch == RW_X_LATCH
? rw_lock_get_x_lock_count(&block->lock) == 1
: rw_lock_get_sx_lock_count(&block->lock) == 1)) {
/* Initialize the page, unless it was already
SX-latched in mtr. (In this case, we would want to
allocate another page that has not been freed in mtr.) */
ut_ad(init_mtr == mtr || !latched);
fsp_init_file_page(space, block, init_mtr);
}
fsp_init_file_page(space, block, init_mtr);
return(block);
}
......@@ -1125,10 +1102,7 @@ The page is marked as used.
@param[in,out] mtr mini-transaction
@param[in,out] init_mtr mini-transaction in which the page should be
initialized (may be the same as mtr)
@retval NULL if no page could be allocated
@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
@retval NULL if no page could be allocated */
static MY_ATTRIBUTE((warn_unused_result, nonnull))
buf_block_t*
fsp_alloc_free_page(
......@@ -1799,11 +1773,10 @@ fseg_create(
block = fseg_alloc_free_page_low(space,
inode, iblock, 0, FSP_UP,
RW_SX_LATCH,
mtr, mtr
#ifdef UNIV_DEBUG
, has_done_reservation
has_done_reservation,
#endif /* UNIV_DEBUG */
);
mtr, mtr);
/* The allocation cannot fail if we have already reserved a
space for the page. */
......@@ -1966,10 +1939,7 @@ not yet taken off it!
@param[out] xdes extent descriptor page
@param[in,out] space tablespace
@param[in,out] mtr mini-transaction
@retval NULL if no page could be allocated
@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
@retval NULL if no page could be allocated */
static
xdes_t*
fseg_alloc_free_extent(
......@@ -2033,14 +2003,8 @@ direction they go alphabetically: FSP_DOWN, FSP_UP, FSP_NO_DIR
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction
@param[in,out] init_mtr mtr or another mini-transaction in
which the page should be initialized. If init_mtr != mtr, but the page is
already latched in mtr, do not initialize the page
@param[in] has_done_reservation TRUE if the space has already been
reserved, in this case we will never return NULL
@retval NULL if no page could be allocated
@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
which the page should be initialized.
@retval NULL if no page could be allocated */
static
buf_block_t*
fseg_alloc_free_page_low(
......@@ -2050,12 +2014,12 @@ fseg_alloc_free_page_low(
ulint hint,
byte direction,
rw_lock_type_t rw_latch,
mtr_t* mtr,
mtr_t* init_mtr
#ifdef UNIV_DEBUG
, ibool has_done_reservation
bool has_done_reservation,
/*!< whether the space has already been reserved */
#endif /* UNIV_DEBUG */
)
mtr_t* mtr,
mtr_t* init_mtr)
{
ib_id_t seg_id;
ulint used;
......@@ -2276,10 +2240,7 @@ fseg_alloc_free_page_low(
Allocates a single free page from a segment. This function implements
the intelligent allocation strategy which tries to minimize file space
fragmentation.
@retval NULL if no page could be allocated
@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
@retval NULL if no page could be allocated */
buf_block_t*
fseg_alloc_free_page_general(
/*=========================*/
......@@ -2291,16 +2252,14 @@ fseg_alloc_free_page_general(
inserted there in order, into which
direction they go alphabetically: FSP_DOWN,
FSP_UP, FSP_NO_DIR */
ibool has_done_reservation, /*!< in: TRUE if the caller has
bool has_done_reservation, /*!< in: true if the caller has
already done the reservation for the page
with fsp_reserve_free_extents, then there
is no need to do the check for this individual
page */
mtr_t* mtr, /*!< in/out: mini-transaction */
mtr_t* init_mtr)/*!< in/out: mtr or another mini-transaction
in which the page should be initialized.
If init_mtr!=mtr, but the page is already
latched in mtr, do not initialize the page. */
in which the page should be initialized. */
{
fseg_inode_t* inode;
ulint space_id;
......@@ -2325,11 +2284,11 @@ fseg_alloc_free_page_general(
block = fseg_alloc_free_page_low(space,
inode, iblock, hint, direction,
RW_X_LATCH, mtr, init_mtr
RW_X_LATCH,
#ifdef UNIV_DEBUG
, has_done_reservation
has_done_reservation,
#endif /* UNIV_DEBUG */
);
mtr, init_mtr);
/* The allocation cannot fail if we have already reserved a
space for the page. */
......
......@@ -601,10 +601,7 @@ btr_get_size_and_reserved(
/**************************************************************//**
Allocates a new file page to be used in an index tree. NOTE: we assume
that the caller has made the reservation for free extents!
@retval NULL if no page could be allocated
@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
@retval NULL if no page could be allocated */
buf_block_t*
btr_page_alloc(
/*===========*/
......
......@@ -409,15 +409,12 @@ file space fragmentation.
@return X-latched block, or NULL if no page could be allocated */
#define fseg_alloc_free_page(seg_header, hint, direction, mtr) \
fseg_alloc_free_page_general(seg_header, hint, direction, \
FALSE, mtr, mtr)
false, mtr, mtr)
/**********************************************************************//**
Allocates a single free page from a segment. This function implements
the intelligent allocation strategy which tries to minimize file space
fragmentation.
@retval NULL if no page could be allocated
@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
@retval NULL if no page could be allocated */
buf_block_t*
fseg_alloc_free_page_general(
/*=========================*/
......@@ -429,16 +426,14 @@ fseg_alloc_free_page_general(
inserted there in order, into which
direction they go alphabetically: FSP_DOWN,
FSP_UP, FSP_NO_DIR */
ibool has_done_reservation, /*!< in: TRUE if the caller has
bool has_done_reservation, /*!< in: true if the caller has
already done the reservation for the page
with fsp_reserve_free_extents, then there
is no need to do the check for this individual
page */
mtr_t* mtr, /*!< in/out: mini-transaction */
mtr_t* init_mtr)/*!< in/out: mtr or another mini-transaction
in which the page should be initialized.
If init_mtr!=mtr, but the page is already
latched in mtr, do not initialize the page. */
in which the page should be initialized. */
MY_ATTRIBUTE((warn_unused_result, nonnull));
/** Reserves free pages from a tablespace. All mini-transactions which may
......
......@@ -553,7 +553,7 @@ buf_block_t* trx_undo_add_page(trx_undo_t* undo, mtr_t* mtr)
new_block = fseg_alloc_free_page_general(
TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
+ header_block->frame,
undo->top_page_no + 1, FSP_UP, TRUE, mtr, mtr);
undo->top_page_no + 1, FSP_UP, true, mtr, mtr);
rseg->space->release_free_extents(n_reserved);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment