Commit edb7de5f authored by marko's avatar marko

branches/zip: On compressed pages, always update the insert buffer bitmap.

Do not assume anything about the contents of the bitmap.

ibuf_update_free_bits_low(): Use this function only for uncompressed pages.
Remove the parameter zip_size.  This function avoids latching the bitmap page
and updating the bitmap when the bits do not change.

ibuf_update_free_bits_zip(): New function based on ibuf_update_free_bits_low(),
for use on compressed pages.  Remove the parameter max_insert_size that
was used for computing the before image of the free bits.  Always update the
bitmap.

ibuf_index_page_calc_free_zip(): New function, factored out from
ibuf_index_page_calc_free().

ibuf_update_free_bits_if_full(): Document that this function must only be
invoked on uncompressed pages.  Add a debug assertion about this.
parent 468df81b
...@@ -1014,7 +1014,6 @@ btr_cur_optimistic_insert( ...@@ -1014,7 +1014,6 @@ btr_cur_optimistic_insert(
buf_block_t* block; buf_block_t* block;
page_t* page; page_t* page;
ulint max_size; ulint max_size;
ulint max_size_zip = 0;
rec_t* dummy_rec; rec_t* dummy_rec;
ibool leaf; ibool leaf;
ibool reorg; ibool reorg;
...@@ -1046,19 +1045,6 @@ btr_cur_optimistic_insert( ...@@ -1046,19 +1045,6 @@ btr_cur_optimistic_insert(
max_size = page_get_max_insert_size_after_reorganize(page, 1); max_size = page_get_max_insert_size_after_reorganize(page, 1);
leaf = page_is_leaf(page); leaf = page_is_leaf(page);
/* If necessary for updating the insert buffer bitmap,
calculate the current maximum insert size on a compressed page. */
if (zip_size && UNIV_LIKELY(leaf) && !dict_index_is_clust(index)) {
const page_zip_des_t* page_zip
= buf_block_get_page_zip(block);
lint zip_max
= page_zip_max_ins_size(page_zip, FALSE);
if (zip_max >= 0 && max_size > (ulint) zip_max) {
max_size_zip = (ulint) zip_max;
}
}
/* Calculate the record size when entry is converted to a record */ /* Calculate the record size when entry is converted to a record */
rec_size = rec_get_converted_size(index, entry, n_ext); rec_size = rec_get_converted_size(index, entry, n_ext);
...@@ -1206,8 +1192,7 @@ fail_err: ...@@ -1206,8 +1192,7 @@ fail_err:
if (zip_size) { if (zip_size) {
/* Update the bits in the same mini-transaction. */ /* Update the bits in the same mini-transaction. */
ibuf_update_free_bits_low(zip_size, block, ibuf_update_free_bits_zip(block, mtr);
max_size_zip, mtr);
} else { } else {
/* Decrement the bits in a separate /* Decrement the bits in a separate
mini-transaction. */ mini-transaction. */
...@@ -1752,8 +1737,7 @@ btr_cur_update_in_place( ...@@ -1752,8 +1737,7 @@ btr_cur_update_in_place(
if (page_zip && !dict_index_is_clust(index) if (page_zip && !dict_index_is_clust(index)
&& page_is_leaf(buf_block_get_frame(block))) { && page_is_leaf(buf_block_get_frame(block))) {
/* Update the free bits in the insert buffer. */ /* Update the free bits in the insert buffer. */
ibuf_update_free_bits_low(buf_block_get_zip_size(block), ibuf_update_free_bits_zip(block, mtr);
block, UNIV_PAGE_SIZE, mtr);
} }
btr_cur_update_in_place_log(flags, rec, index, update, btr_cur_update_in_place_log(flags, rec, index, update,
...@@ -1956,8 +1940,7 @@ err_exit: ...@@ -1956,8 +1940,7 @@ err_exit:
if (page_zip && !dict_index_is_clust(index) if (page_zip && !dict_index_is_clust(index)
&& page_is_leaf(page)) { && page_is_leaf(page)) {
/* Update the free bits in the insert buffer. */ /* Update the free bits in the insert buffer. */
ibuf_update_free_bits_low(buf_block_get_zip_size(block), block, ibuf_update_free_bits_zip(block, mtr);
UNIV_PAGE_SIZE, mtr);
} }
if (!rec_get_deleted_flag(rec, page_is_comp(page))) { if (!rec_get_deleted_flag(rec, page_is_comp(page))) {
...@@ -2224,9 +2207,7 @@ btr_cur_pessimistic_update( ...@@ -2224,9 +2207,7 @@ btr_cur_pessimistic_update(
if (page_zip && !dict_index_is_clust(index) if (page_zip && !dict_index_is_clust(index)
&& page_is_leaf(page)) { && page_is_leaf(page)) {
/* Update the free bits in the insert buffer. */ /* Update the free bits in the insert buffer. */
ibuf_update_free_bits_low( ibuf_update_free_bits_zip(block, mtr);
buf_block_get_zip_size(block), block,
UNIV_PAGE_SIZE, mtr);
} }
err = DB_SUCCESS; err = DB_SUCCESS;
...@@ -2708,7 +2689,6 @@ btr_cur_optimistic_delete( ...@@ -2708,7 +2689,6 @@ btr_cur_optimistic_delete(
mtr_t* mtr) /* in: mtr */ mtr_t* mtr) /* in: mtr */
{ {
buf_block_t* block; buf_block_t* block;
ulint max_ins_size;
rec_t* rec; rec_t* rec;
mem_heap_t* heap = NULL; mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint offsets_[REC_OFFS_NORMAL_SIZE];
...@@ -2736,24 +2716,15 @@ btr_cur_optimistic_delete( ...@@ -2736,24 +2716,15 @@ btr_cur_optimistic_delete(
page_t* page = buf_block_get_frame(block); page_t* page = buf_block_get_frame(block);
page_zip_des_t* page_zip= buf_block_get_page_zip(block); page_zip_des_t* page_zip= buf_block_get_page_zip(block);
ulint zip_size= buf_block_get_zip_size(block); ulint max_ins = 0;
lock_update_delete(block, rec); lock_update_delete(block, rec);
btr_search_update_hash_on_delete(cursor); btr_search_update_hash_on_delete(cursor);
max_ins_size = page_get_max_insert_size_after_reorganize( if (!page_zip) {
max_ins = page_get_max_insert_size_after_reorganize(
page, 1); page, 1);
if (zip_size) {
lint zip_max_ins = page_zip_max_ins_size(
page_zip, FALSE/* not clustered */);
if (UNIV_UNLIKELY(zip_max_ins < 0)) {
max_ins_size = 0;
} else if (UNIV_LIKELY
(max_ins_size > (ulint) zip_max_ins)) {
max_ins_size = (ulint) zip_max_ins;
}
} }
#ifdef UNIV_ZIP_DEBUG #ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page)); ut_a(!page_zip || page_zip_validate(page_zip, page));
...@@ -2764,10 +2735,15 @@ btr_cur_optimistic_delete( ...@@ -2764,10 +2735,15 @@ btr_cur_optimistic_delete(
ut_a(!page_zip || page_zip_validate(page_zip, page)); ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_ZIP_DEBUG */ #endif /* UNIV_ZIP_DEBUG */
if (!dict_index_is_clust(cursor->index) if (dict_index_is_clust(cursor->index)
&& page_is_leaf(page)) { || !page_is_leaf(page)) {
ibuf_update_free_bits_low(zip_size, block, /* The insert buffer does not handle
max_ins_size, mtr); inserts to clustered indexes or to non-leaf
pages of secondary index B-trees. */
} else if (page_zip) {
ibuf_update_free_bits_zip(block, mtr);
} else {
ibuf_update_free_bits_low(block, max_ins, mtr);
} }
} }
......
...@@ -792,11 +792,11 @@ ibuf_set_free_bits_low( ...@@ -792,11 +792,11 @@ ibuf_set_free_bits_low(
/*===================*/ /*===================*/
ulint zip_size,/* in: compressed page size in bytes; ulint zip_size,/* in: compressed page size in bytes;
0 for uncompressed pages */ 0 for uncompressed pages */
buf_block_t* block, /* in: index page; free bits are set if const buf_block_t* block, /* in: index page; free bits are set if
the index is non-clustered and page the index is non-clustered and page
level is 0 */ level is 0 */
ulint val, /* in: value to set: < 4 */ ulint val, /* in: value to set: < 4 */
mtr_t* mtr) /* in: mtr */ mtr_t* mtr) /* in/out: mtr */
{ {
page_t* bitmap_page; page_t* bitmap_page;
ulint space; ulint space;
...@@ -911,33 +911,70 @@ ibuf_reset_free_bits( ...@@ -911,33 +911,70 @@ ibuf_reset_free_bits(
} }
/************************************************************************** /**************************************************************************
Updates the free bits for a page to reflect the present state. Does this Updates the free bits for an uncompressed page to reflect the present state.
in the mtr given, which means that the latching order rules virtually prevent Does this in the mtr given, which means that the latching order rules virtually
any further operations for this OS thread until mtr is committed. */ prevent any further operations for this OS thread until mtr is committed. */
void void
ibuf_update_free_bits_low( ibuf_update_free_bits_low(
/*======================*/ /*======================*/
ulint zip_size, /* in: compressed page size in bytes; const buf_block_t* block, /* in: index page */
0 for uncompressed pages */ ulint max_ins_size, /* in: value of
buf_block_t* block, /* in: index page */ maximum insert size
ulint max_ins_size, /* in: value of maximum insert size with reorganize before
with reorganize before the latest the latest operation
operation performed to the page */ performed to the page */
mtr_t* mtr) /* in: mtr */ mtr_t* mtr) /* in/out: mtr */
{ {
ulint before; ulint before;
ulint after; ulint after;
before = ibuf_index_page_calc_free_bits(zip_size, max_ins_size); ut_a(!buf_block_get_page_zip(block));
after = ibuf_index_page_calc_free(zip_size, block); before = ibuf_index_page_calc_free_bits(0, max_ins_size);
after = ibuf_index_page_calc_free(0, block);
/* This approach cannot be used on compressed pages, since the
computed value of "before" often does not match the current
state of the bitmap. This is because the free space may
increase or decrease when a compressed page is reorganized. */
if (before != after) { if (before != after) {
ibuf_set_free_bits_low(zip_size, block, after, mtr); ibuf_set_free_bits_low(0, block, after, mtr);
} }
} }
/**************************************************************************
Updates the free bits for a compressed page to reflect the present state.
Does this in the mtr given, which means that the latching order rules virtually
prevent any further operations for this OS thread until mtr is committed. */
void
ibuf_update_free_bits_zip(
/*======================*/
const buf_block_t* block, /* in: index page */
mtr_t* mtr) /* in/out: mtr */
{
page_t* bitmap_page;
ulint space;
ulint page_no;
ulint zip_size;
ulint after;
space = buf_block_get_space(block);
page_no = buf_block_get_page_no(block);
zip_size = buf_block_get_zip_size(block);
ut_a(page_is_leaf(buf_block_get_frame(block)));
ut_a(zip_size);
bitmap_page = ibuf_bitmap_get_map_page(space, page_no, zip_size, mtr);
after = ibuf_index_page_calc_free_zip(zip_size, block);
ibuf_bitmap_page_set_bits(bitmap_page, page_no, zip_size,
IBUF_BITMAP_FREE, after, mtr);
}
/************************************************************************** /**************************************************************************
Updates the free bits for the two pages to reflect the present state. Does Updates the free bits for the two pages to reflect the present state. Does
this in the mtr given, which means that the latching order rules virtually this in the mtr given, which means that the latching order rules virtually
......
...@@ -67,10 +67,11 @@ ibuf_reset_free_bits( ...@@ -67,10 +67,11 @@ ibuf_reset_free_bits(
if the index is a non-clustered if the index is a non-clustered
non-unique, and page level is 0 */ non-unique, and page level is 0 */
/**************************************************************************** /****************************************************************************
Updates the free bits of the page in the ibuf bitmap if there is not enough Updates the free bits of an uncompressed page in the ibuf bitmap if
free on the page any more. This is done in a separate mini-transaction, hence there is not enough free on the page any more. This is done in a
this operation does not restrict further work to only ibuf bitmap operations, separate mini-transaction, hence this operation does not restrict
which would result if the latch to the bitmap page were kept. */ further work to only ibuf bitmap operations, which would result if the
latch to the bitmap page were kept. */
UNIV_INLINE UNIV_INLINE
void void
ibuf_update_free_bits_if_full( ibuf_update_free_bits_if_full(
...@@ -87,20 +88,30 @@ ibuf_update_free_bits_if_full( ...@@ -87,20 +88,30 @@ ibuf_update_free_bits_if_full(
used in the latest operation, if known, or used in the latest operation, if known, or
ULINT_UNDEFINED */ ULINT_UNDEFINED */
/************************************************************************** /**************************************************************************
Updates the free bits for the page to reflect the present state. Does this Updates the free bits for an uncompressed page to reflect the present state.
in the mtr given, which means that the latching order rules virtually Does this in the mtr given, which means that the latching order rules virtually
prevent any further operations for this OS thread until mtr is committed. */ prevent any further operations for this OS thread until mtr is committed. */
void void
ibuf_update_free_bits_low( ibuf_update_free_bits_low(
/*======================*/ /*======================*/
ulint zip_size, /* in: compressed page size in bytes; const buf_block_t* block, /* in: index page */
0 for uncompressed pages */ ulint max_ins_size, /* in: value of
buf_block_t* block, /* in: index page */ maximum insert size
ulint max_ins_size, /* in: value of maximum insert size with reorganize before
with reorganize before the latest the latest operation
operation performed to the page */ performed to the page */
mtr_t* mtr); /* in: mtr */ mtr_t* mtr); /* in/out: mtr */
/**************************************************************************
Updates the free bits for a compressed page to reflect the present state.
Does this in the mtr given, which means that the latching order rules virtually
prevent any further operations for this OS thread until mtr is committed. */
void
ibuf_update_free_bits_zip(
/*======================*/
const buf_block_t* block, /* in: index page */
mtr_t* mtr); /* in/out: mtr */
/************************************************************************** /**************************************************************************
Updates the free bits for the two pages to reflect the present state. Does Updates the free bits for the two pages to reflect the present state. Does
this in the mtr given, which means that the latching order rules virtually this in the mtr given, which means that the latching order rules virtually
......
...@@ -193,48 +193,70 @@ ibuf_index_page_calc_free_from_bits( ...@@ -193,48 +193,70 @@ ibuf_index_page_calc_free_from_bits(
} }
/************************************************************************* /*************************************************************************
Translates the free space on a page to a value in the ibuf bitmap.*/ Translates the free space on a compressed page to a value in the ibuf bitmap.*/
UNIV_INLINE UNIV_INLINE
ulint ulint
ibuf_index_page_calc_free( ibuf_index_page_calc_free_zip(
/*======================*/ /*==========================*/
/* out: value for ibuf bitmap bits */ /* out: value for ibuf bitmap bits */
ulint zip_size,/* in: compressed page size in bytes; ulint zip_size,
0 for uncompressed pages */ /* in: compressed page size in bytes */
const buf_block_t* block) /* in: buffer block */ const buf_block_t* block) /* in: buffer block */
{ {
ulint max_ins_size; ulint max_ins_size;
const page_zip_des_t* page_zip;
lint zip_max_ins;
ut_ad(zip_size == buf_block_get_zip_size(block)); ut_ad(zip_size == buf_block_get_zip_size(block));
ut_ad(zip_size);
max_ins_size = page_get_max_insert_size_after_reorganize( max_ins_size = page_get_max_insert_size_after_reorganize(
buf_block_get_frame(block), 1); buf_block_get_frame(block), 1);
if (!zip_size) {
return(ibuf_index_page_calc_free_bits(0, max_ins_size));
} else {
const page_zip_des_t* page_zip;
lint zip_max_ins;
page_zip = buf_block_get_page_zip(block); page_zip = buf_block_get_page_zip(block);
zip_max_ins = page_zip_max_ins_size(page_zip, zip_max_ins = page_zip_max_ins_size(page_zip,
FALSE/* not clustered */); FALSE/* not clustered */);
if (UNIV_UNLIKELY(zip_max_ins < 0)) { if (UNIV_UNLIKELY(zip_max_ins < 0)) {
max_ins_size = 0; return(0);
} else if (UNIV_LIKELY(max_ins_size > (ulint) zip_max_ins)) { } else if (UNIV_LIKELY(max_ins_size > (ulint) zip_max_ins)) {
max_ins_size = (ulint) zip_max_ins; max_ins_size = (ulint) zip_max_ins;
} }
return(ibuf_index_page_calc_free_bits(zip_size, max_ins_size)); return(ibuf_index_page_calc_free_bits(zip_size, max_ins_size));
}
/*************************************************************************
Translates the free space on a page to a value in the ibuf bitmap.*/
UNIV_INLINE
ulint
ibuf_index_page_calc_free(
/*======================*/
/* out: value for ibuf bitmap bits */
ulint zip_size,/* in: compressed page size in bytes;
0 for uncompressed pages */
const buf_block_t* block) /* in: buffer block */
{
ut_ad(zip_size == buf_block_get_zip_size(block));
if (!zip_size) {
ulint max_ins_size;
max_ins_size = page_get_max_insert_size_after_reorganize(
buf_block_get_frame(block), 1);
return(ibuf_index_page_calc_free_bits(0, max_ins_size));
} else {
return(ibuf_index_page_calc_free_zip(zip_size, block));
} }
} }
/**************************************************************************** /****************************************************************************
Updates the free bits of the page in the ibuf bitmap if there is not enough Updates the free bits of an uncompressed page in the ibuf bitmap if
free on the page any more. This is done in a separate mini-transaction, hence there is not enough free on the page any more. This is done in a
this operation does not restrict further work to only ibuf bitmap operations, separate mini-transaction, hence this operation does not restrict
which would result if the latch to the bitmap page were kept. */ further work to only ibuf bitmap operations, which would result if the
latch to the bitmap page were kept. */
UNIV_INLINE UNIV_INLINE
void void
ibuf_update_free_bits_if_full( ibuf_update_free_bits_if_full(
...@@ -254,6 +276,8 @@ ibuf_update_free_bits_if_full( ...@@ -254,6 +276,8 @@ ibuf_update_free_bits_if_full(
ulint before; ulint before;
ulint after; ulint after;
ut_ad(!buf_block_get_page_zip(block));
before = ibuf_index_page_calc_free_bits(0, max_ins_size); before = ibuf_index_page_calc_free_bits(0, max_ins_size);
if (max_ins_size >= increase) { if (max_ins_size >= increase) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment