Commit 9f4aae8b authored by Marko Mäkelä's avatar Marko Mäkelä

Bug#12948130 UNNECESSARY X-LOCKING OF ADAPTIVE HASH INDEX (BTR_SEARCH_LATCH)

InnoDB acquires an x-latch on btr_search_latch for certain in-place updates
that do affect the adaptive hash index. These operations do not really need
to be protected by the btr_search_latch:

* updating DB_TRX_ID
* updating DB_ROLL_PTR
* updating PAGE_MAX_TRX_ID
* updating the delete-mark flag

rb:750 approved by Sunny Bains
parent 928b12e5
2011-09-08 The InnoDB Team
* btr/btr0cur.c, include/page0page.h, include/row0upd.ic:
Fix Bug#12948130 UNNECESSARY X-LOCKING OF ADAPTIVE HASH INDEX
2011-09-06 The InnoDB Team 2011-09-06 The InnoDB Team
* buf/buf0buddy.c: * buf/buf0buddy.c:
......
...@@ -1727,6 +1727,7 @@ btr_cur_update_in_place( ...@@ -1727,6 +1727,7 @@ btr_cur_update_in_place(
roll_ptr_t roll_ptr = ut_dulint_zero; roll_ptr_t roll_ptr = ut_dulint_zero;
trx_t* trx; trx_t* trx;
ulint was_delete_marked; ulint was_delete_marked;
ibool is_hashed;
mem_heap_t* heap = NULL; mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_; ulint* offsets = offsets_;
...@@ -1768,7 +1769,21 @@ btr_cur_update_in_place( ...@@ -1768,7 +1769,21 @@ btr_cur_update_in_place(
return(err); return(err);
} }
if (block->is_hashed) { if (!(flags & BTR_KEEP_SYS_FLAG)) {
row_upd_rec_sys_fields(rec, NULL,
index, offsets, trx, roll_ptr);
}
was_delete_marked = rec_get_deleted_flag(
rec, page_is_comp(buf_block_get_frame(block)));
is_hashed = block->is_hashed;
if (is_hashed) {
/* TO DO: Can we skip this if none of the fields
index->search_info->curr_n_fields
are being updated? */
/* The function row_upd_changes_ord_field_binary works only /* The function row_upd_changes_ord_field_binary works only
if the update vector was built for a clustered index, we must if the update vector was built for a clustered index, we must
NOT call it if index is secondary */ NOT call it if index is secondary */
...@@ -1784,17 +1799,9 @@ btr_cur_update_in_place( ...@@ -1784,17 +1799,9 @@ btr_cur_update_in_place(
rw_lock_x_lock(&btr_search_latch); rw_lock_x_lock(&btr_search_latch);
} }
if (!(flags & BTR_KEEP_SYS_FLAG)) {
row_upd_rec_sys_fields(rec, NULL,
index, offsets, trx, roll_ptr);
}
was_delete_marked = rec_get_deleted_flag(
rec, page_is_comp(buf_block_get_frame(block)));
row_upd_rec_in_place(rec, index, offsets, update, page_zip); row_upd_rec_in_place(rec, index, offsets, update, page_zip);
if (block->is_hashed) { if (is_hashed) {
rw_lock_x_unlock(&btr_search_latch); rw_lock_x_unlock(&btr_search_latch);
} }
...@@ -2520,7 +2527,8 @@ btr_cur_parse_del_mark_set_clust_rec( ...@@ -2520,7 +2527,8 @@ btr_cur_parse_del_mark_set_clust_rec(
/* We do not need to reserve btr_search_latch, as the page /* We do not need to reserve btr_search_latch, as the page
is only being recovered, and there cannot be a hash index to is only being recovered, and there cannot be a hash index to
it. */ it. Besides, these fields are being updated in place
and the adaptive hash index does not depend on them. */
btr_rec_set_deleted_flag(rec, page_zip, val); btr_rec_set_deleted_flag(rec, page_zip, val);
...@@ -2600,9 +2608,9 @@ btr_cur_del_mark_set_clust_rec( ...@@ -2600,9 +2608,9 @@ btr_cur_del_mark_set_clust_rec(
return(err); return(err);
} }
if (block->is_hashed) { /* The btr_search_latch is not needed here, because
rw_lock_x_lock(&btr_search_latch); the adaptive hash index does not depend on the delete-mark
} and the delete-mark is being updated in place. */
page_zip = buf_block_get_page_zip(block); page_zip = buf_block_get_page_zip(block);
...@@ -2616,10 +2624,6 @@ btr_cur_del_mark_set_clust_rec( ...@@ -2616,10 +2624,6 @@ btr_cur_del_mark_set_clust_rec(
index, offsets, trx, roll_ptr); index, offsets, trx, roll_ptr);
} }
if (block->is_hashed) {
rw_lock_x_unlock(&btr_search_latch);
}
btr_cur_del_mark_set_clust_rec_log(flags, rec, index, val, trx, btr_cur_del_mark_set_clust_rec_log(flags, rec, index, val, trx,
roll_ptr, mtr); roll_ptr, mtr);
...@@ -2695,7 +2699,8 @@ btr_cur_parse_del_mark_set_sec_rec( ...@@ -2695,7 +2699,8 @@ btr_cur_parse_del_mark_set_sec_rec(
/* We do not need to reserve btr_search_latch, as the page /* We do not need to reserve btr_search_latch, as the page
is only being recovered, and there cannot be a hash index to is only being recovered, and there cannot be a hash index to
it. */ it. Besides, the delete-mark flag is being updated in place
and the adaptive hash index does not depend on it. */
btr_rec_set_deleted_flag(rec, page_zip, val); btr_rec_set_deleted_flag(rec, page_zip, val);
} }
...@@ -2743,16 +2748,11 @@ btr_cur_del_mark_set_sec_rec( ...@@ -2743,16 +2748,11 @@ btr_cur_del_mark_set_sec_rec(
ut_ad(!!page_rec_is_comp(rec) ut_ad(!!page_rec_is_comp(rec)
== dict_table_is_comp(cursor->index->table)); == dict_table_is_comp(cursor->index->table));
if (block->is_hashed) { /* We do not need to reserve btr_search_latch, as the
rw_lock_x_lock(&btr_search_latch); delete-mark flag is being updated in place and the adaptive
} hash index does not depend on it. */
btr_rec_set_deleted_flag(rec, buf_block_get_page_zip(block), val); btr_rec_set_deleted_flag(rec, buf_block_get_page_zip(block), val);
if (block->is_hashed) {
rw_lock_x_unlock(&btr_search_latch);
}
btr_cur_del_mark_set_sec_rec_log(rec, val, mtr); btr_cur_del_mark_set_sec_rec_log(rec, val, mtr);
return(DB_SUCCESS); return(DB_SUCCESS);
...@@ -2772,8 +2772,11 @@ btr_cur_del_unmark_for_ibuf( ...@@ -2772,8 +2772,11 @@ btr_cur_del_unmark_for_ibuf(
uncompressed */ uncompressed */
mtr_t* mtr) /*!< in: mtr */ mtr_t* mtr) /*!< in: mtr */
{ {
/* We do not need to reserve btr_search_latch, as the page has just /* We do not need to reserve btr_search_latch, as the page
been read to the buffer pool and there cannot be a hash index to it. */ has just been read to the buffer pool and there cannot be
a hash index to it. Besides, the delete-mark flag is being
updated in place and the adaptive hash index does not depend
on it. */
btr_rec_set_deleted_flag(rec, page_zip, FALSE); btr_rec_set_deleted_flag(rec, page_zip, FALSE);
......
...@@ -68,10 +68,7 @@ typedef byte page_header_t; ...@@ -68,10 +68,7 @@ typedef byte page_header_t;
#define PAGE_MAX_TRX_ID 18 /* highest id of a trx which may have modified #define PAGE_MAX_TRX_ID 18 /* highest id of a trx which may have modified
a record on the page; a dulint; defined only a record on the page; a dulint; defined only
in secondary indexes and in the insert buffer in secondary indexes and in the insert buffer
tree; NOTE: this may be modified only tree */
when the thread has an x-latch to the page,
and ALSO an x-latch to btr_search_latch
if there is a hash index to the page! */
#define PAGE_HEADER_PRIV_END 26 /* end of private data structure of the page #define PAGE_HEADER_PRIV_END 26 /* end of private data structure of the page
header which are set in a page create */ header which are set in a page create */
/*----*/ /*----*/
......
...@@ -157,11 +157,6 @@ row_upd_rec_sys_fields( ...@@ -157,11 +157,6 @@ row_upd_rec_sys_fields(
{ {
ut_ad(dict_index_is_clust(index)); ut_ad(dict_index_is_clust(index));
ut_ad(rec_offs_validate(rec, index, offsets)); ut_ad(rec_offs_validate(rec, index, offsets));
#ifdef UNIV_SYNC_DEBUG
if (!rw_lock_own(&btr_search_latch, RW_LOCK_EX)) {
ut_ad(!buf_block_align(rec)->is_hashed);
}
#endif /* UNIV_SYNC_DEBUG */
if (UNIV_LIKELY_NULL(page_zip)) { if (UNIV_LIKELY_NULL(page_zip)) {
ulint pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID); ulint pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment