Commit 2e64513f authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-20612 preparation: Fewer calls to buf_page_t::id()

parent b19ec884
......@@ -1873,6 +1873,8 @@ btr_root_raise_and_insert(
ut_a(!root_page_zip || page_zip_validate(root_page_zip, root->frame,
index));
#endif /* UNIV_ZIP_DEBUG */
const page_id_t root_id{root->page.id()};
#ifdef UNIV_BTR_DEBUG
if (!dict_index_is_ibuf(index)) {
ulint space = index->table->space_id;
......@@ -1883,7 +1885,7 @@ btr_root_raise_and_insert(
+ root->frame, space));
}
ut_a(dict_index_get_page(index) == root->page.id().page_no());
ut_a(dict_index_get_page(index) == root_id.page_no());
#endif /* UNIV_BTR_DEBUG */
ut_ad(mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
......@@ -1941,7 +1943,7 @@ btr_root_raise_and_insert(
/* Move any existing predicate locks */
if (dict_index_is_spatial(index)) {
lock_prdt_rec_move(new_block, root);
lock_prdt_rec_move(new_block, root_id);
} else {
btr_search_move_or_delete_hash_entries(
new_block, root);
......@@ -1986,7 +1988,7 @@ btr_root_raise_and_insert(
root page: we cannot discard the lock structs on the root page */
if (!dict_table_is_locking_disabled(index->table)) {
lock_update_root_raise(new_block, root);
lock_update_root_raise(*new_block, root_id);
}
/* Create a memory heap where the node pointer is stored */
......@@ -3342,7 +3344,7 @@ btr_lift_page_up(
/* Also update the predicate locks */
if (dict_index_is_spatial(index)) {
lock_prdt_rec_move(father_block, block);
lock_prdt_rec_move(father_block, block->page.id());
} else {
btr_search_move_or_delete_hash_entries(
father_block, block);
......@@ -3350,14 +3352,14 @@ btr_lift_page_up(
}
if (!dict_table_is_locking_disabled(index->table)) {
const page_id_t id{block->page.id()};
/* Free predicate page locks on the block */
if (dict_index_is_spatial(index)) {
lock_sys.mutex_lock();
if (index->is_spatial()) {
LockMutexGuard g;
lock_prdt_page_free_from_discard(
block, &lock_sys.prdt_page_hash);
lock_sys.mutex_unlock();
id, &lock_sys.prdt_page_hash);
}
lock_update_copy_and_discard(father_block, block);
lock_update_copy_and_discard(*father_block, id);
}
/* Go upward to root page, decrementing levels by one. */
......@@ -3576,6 +3578,8 @@ btr_compress(
/* Remove the page from the level list */
btr_level_list_remove(*block, *index, mtr);
const page_id_t id{block->page.id()};
if (dict_index_is_spatial(index)) {
rec_t* my_rec = father_cursor.page_cur.rec;
......@@ -3605,16 +3609,15 @@ btr_compress(
}
/* No GAP lock needs to be worrying about */
lock_sys.mutex_lock();
LockMutexGuard g;
lock_prdt_page_free_from_discard(
block, &lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page(block);
lock_sys.mutex_unlock();
id, &lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page(id);
} else {
btr_cur_node_ptr_delete(&father_cursor, mtr);
if (!dict_table_is_locking_disabled(index->table)) {
lock_update_merge_left(
merge_block, orig_pred, block);
*merge_block, orig_pred, id);
}
}
......@@ -3758,11 +3761,11 @@ btr_compress(
offsets2, offsets,
merge_page, mtr);
}
lock_sys.mutex_lock();
const page_id_t id{block->page.id()};
LockMutexGuard g;
lock_prdt_page_free_from_discard(
block, &lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page(block);
lock_sys.mutex_unlock();
id, &lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page(id);
} else {
compressed = btr_cur_pessimistic_delete(&err, TRUE,
......
......@@ -3311,15 +3311,15 @@ static void btr_cur_prefetch_siblings(const buf_block_t *block,
uint32_t prev= mach_read_from_4(my_assume_aligned<4>(page + FIL_PAGE_PREV));
uint32_t next= mach_read_from_4(my_assume_aligned<4>(page + FIL_PAGE_NEXT));
fil_space_t *space= index->table->space;
if (prev == FIL_NULL);
else if (index->table->space->acquire())
buf_read_page_background(index->table->space,
page_id_t(block->page.id().space(), prev),
else if (space->acquire())
buf_read_page_background(space, page_id_t(space->id, prev),
block->zip_size(), false);
if (next == FIL_NULL);
else if (index->table->space->acquire())
buf_read_page_background(index->table->space,
page_id_t(block->page.id().space(), next),
else if (space->acquire())
buf_read_page_background(space, page_id_t(space->id, next),
block->zip_size(), false);
}
......@@ -3859,7 +3859,7 @@ btr_cur_upd_lock_and_undo(
if (!(flags & BTR_NO_LOCKING_FLAG)) {
err = lock_clust_rec_modify_check_and_lock(
flags, btr_cur_get_block(cursor), rec, index,
btr_cur_get_block(cursor), rec, index,
offsets, thr);
if (err != DB_SUCCESS) {
return(err);
......@@ -4751,7 +4751,8 @@ btr_cur_optimistic_update(
btr_page_reorganize(page_cursor, index, mtr);
} else if (!dict_table_is_locking_disabled(index->table)) {
/* Restore the old explicit lock state on the record */
lock_rec_restore_from_page_infimum(block, rec, block);
lock_rec_restore_from_page_infimum(*block, rec,
block->page.id());
}
page_cur_move_to_next(page_cursor);
......@@ -4805,10 +4806,11 @@ btr_cur_pess_upd_restore_supremum(
const uint32_t prev_page_no = btr_page_get_prev(page);
const page_id_t page_id(block->page.id().space(), prev_page_no);
const page_id_t block_id{block->page.id()};
const page_id_t prev_id(block_id.space(), prev_page_no);
ut_ad(prev_page_no != FIL_NULL);
prev_block = buf_page_get_with_no_latch(page_id, block->zip_size(),
prev_block = buf_page_get_with_no_latch(prev_id, block->zip_size(),
mtr);
#ifdef UNIV_BTR_DEBUG
ut_a(btr_page_get_next(prev_block->frame)
......@@ -4818,7 +4820,7 @@ btr_cur_pess_upd_restore_supremum(
/* We must already have an x-latch on prev_block! */
ut_ad(mtr->memo_contains_flagged(prev_block, MTR_MEMO_PAGE_X_FIX));
lock_rec_reset_and_inherit_gap_locks(prev_block, block,
lock_rec_reset_and_inherit_gap_locks(*prev_block, block_id,
PAGE_HEAP_NO_SUPREMUM,
page_rec_get_heap_no(rec));
}
......@@ -5106,7 +5108,8 @@ btr_cur_pessimistic_update(
}
} else if (!dict_table_is_locking_disabled(index->table)) {
lock_rec_restore_from_page_infimum(
btr_cur_get_block(cursor), rec, block);
*btr_cur_get_block(cursor), rec,
block->page.id());
}
if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets))
......@@ -5251,7 +5254,7 @@ btr_cur_pessimistic_update(
rec = page_cursor->rec;
} else if (!dict_table_is_locking_disabled(index->table)) {
lock_rec_restore_from_page_infimum(
btr_cur_get_block(cursor), rec, block);
*btr_cur_get_block(cursor), rec, block->page.id());
}
/* If necessary, restore also the correct lock state for a new,
......@@ -5351,14 +5354,6 @@ btr_cur_del_mark_set_clust_rec(
return(DB_SUCCESS);
}
err = lock_clust_rec_modify_check_and_lock(BTR_NO_LOCKING_FLAG, block,
rec, index, offsets, thr);
if (err != DB_SUCCESS) {
return(err);
}
err = trx_undo_report_row_operation(thr, index,
entry, NULL, 0, rec, offsets,
&roll_ptr);
......
......@@ -397,8 +397,8 @@ btr_defragment_merge_pages(
if (n_recs_to_move == n_recs) {
/* The whole page is merged with the previous page,
free it. */
lock_update_merge_left(to_block, orig_pred,
from_block);
const page_id_t from{from_block->page.id()};
lock_update_merge_left(*to_block, orig_pred, from);
btr_search_drop_page_hash_index(from_block);
btr_level_list_remove(*from_block, *index, mtr);
btr_page_get_father(index, from_block, mtr, &parent);
......
/*****************************************************************************
Copyright (c) 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2018, 2020, MariaDB Corporation.
Copyright (c) 2018, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -769,7 +769,7 @@ rtr_split_page_move_rec_list(
ut_a(rec);
lock_rec_restore_from_page_infimum(
new_block, rec, block);
*new_block, rec, block->page.id());
page_cur_move_to_next(&new_page_cursor);
......
/*****************************************************************************
Copyright (c) 2016, 2018, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2020, MariaDB Corporation.
Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -1164,7 +1164,7 @@ rtr_check_discard_page(
the root page */
buf_block_t* block) /*!< in: block of page to be discarded */
{
const ulint pageno = block->page.id().page_no();
const page_id_t id{block->page.id()};
mysql_mutex_lock(&index->rtr_track->rtr_active_mutex);
......@@ -1175,8 +1175,8 @@ rtr_check_discard_page(
mysql_mutex_lock(&rtr_info->rtr_path_mutex);
for (const node_visit_t& node : *rtr_info->path) {
if (node.page_no == pageno) {
rtr_rebuild_path(rtr_info, pageno);
if (node.page_no == id.page_no()) {
rtr_rebuild_path(rtr_info, node.page_no);
break;
}
}
......@@ -1185,8 +1185,7 @@ rtr_check_discard_page(
if (rtr_info->matches) {
mysql_mutex_lock(&rtr_info->matches->rtr_match_mutex);
if ((&rtr_info->matches->block)->page.id().page_no()
== pageno) {
if ((&rtr_info->matches->block)->page.id() == id) {
if (!rtr_info->matches->matched_recs->empty()) {
rtr_info->matches->matched_recs->clear();
}
......@@ -1200,10 +1199,9 @@ rtr_check_discard_page(
mysql_mutex_unlock(&index->rtr_track->rtr_active_mutex);
lock_sys.mutex_lock();
lock_prdt_page_free_from_discard(block, &lock_sys.prdt_hash);
lock_prdt_page_free_from_discard(block, &lock_sys.prdt_page_hash);
lock_sys.mutex_unlock();
LockMutexGuard g;
lock_prdt_page_free_from_discard(id, &lock_sys.prdt_hash);
lock_prdt_page_free_from_discard(id, &lock_sys.prdt_page_hash);
}
/** Structure acts as functor to get the optimistic access of the page.
......
......@@ -3790,7 +3790,8 @@ ibuf_insert_to_index_page(
&page_cur);
ut_ad(!cmp_dtuple_rec(entry, rec, offsets));
lock_rec_restore_from_page_infimum(block, rec, block);
lock_rec_restore_from_page_infimum(*block, rec,
block->page.id());
} else {
offsets = NULL;
ibuf_insert_to_index_page_low(entry, block, index,
......
......@@ -112,28 +112,17 @@ lock_update_merge_right(
const buf_block_t* left_block); /*!< in: merged index
page which will be
discarded */
/*************************************************************//**
Updates the lock table when the root page is copied to another in
btr_root_raise_and_insert. Note that we leave lock structs on the
/** Update locks when the root page is copied to another in
btr_root_raise_and_insert(). Note that we leave lock structs on the
root page, even though they do not make sense on other than leaf
pages: the reason is that in a pessimistic update the infimum record
of the root page will act as a dummy carrier of the locks of the record
to be updated. */
void
lock_update_root_raise(
/*===================*/
const buf_block_t* block, /*!< in: index page to which copied */
const buf_block_t* root); /*!< in: root page */
/*************************************************************//**
Updates the lock table when a page is copied to another and the original page
is removed from the chain of leaf pages, except if page is the root! */
void
lock_update_copy_and_discard(
/*=========================*/
const buf_block_t* new_block, /*!< in: index page to
which copied */
const buf_block_t* block); /*!< in: index page;
NOT the root! */
void lock_update_root_raise(const buf_block_t &block, const page_id_t root);
/** Update the lock table when a page is copied to another.
@param new_block the target page
@param old old page (not index root page) */
void lock_update_copy_and_discard(const buf_block_t &new_block, page_id_t old);
/*************************************************************//**
Updates the lock table when a page is split to the left. */
void
......@@ -141,18 +130,12 @@ lock_update_split_left(
/*===================*/
const buf_block_t* right_block, /*!< in: right page */
const buf_block_t* left_block); /*!< in: left page */
/*************************************************************//**
Updates the lock table when a page is merged to the left. */
void
lock_update_merge_left(
/*===================*/
const buf_block_t* left_block, /*!< in: left page to
which merged */
const rec_t* orig_pred, /*!< in: original predecessor
of supremum on the left page
before merge */
const buf_block_t* right_block); /*!< in: merged index page
which will be discarded */
/** Update the lock table when a page is merged to the left.
@param left left page
@param orig_pred original predecessor of supremum on the left page before merge
@param right merged, to-be-discarded right page */
void lock_update_merge_left(const buf_block_t& left, const rec_t *orig_pred,
const page_id_t right);
/*************************************************************//**
Updates the lock table when a page is split and merged to
two pages. */
......@@ -169,9 +152,9 @@ inherited from rec. */
void
lock_rec_reset_and_inherit_gap_locks(
/*=================================*/
const buf_block_t* heir_block, /*!< in: block containing the
const buf_block_t& heir_block, /*!< in: block containing the
record which inherits */
const buf_block_t* block, /*!< in: block containing the
const page_id_t donor, /*!< in: page containing the
record from which inherited;
does NOT reset the locks on
this record */
......@@ -220,20 +203,14 @@ lock_rec_store_on_page_infimum(
record of the same page; lock
bits are reset on the
record */
/*********************************************************************//**
Restores the state of explicit lock requests on a single record, where the
state was stored on the infimum of the page. */
void
lock_rec_restore_from_page_infimum(
/*===============================*/
const buf_block_t* block, /*!< in: buffer block containing rec */
const rec_t* rec, /*!< in: record whose lock state
is restored */
const buf_block_t* donator);/*!< in: page (rec is not
necessarily on this page)
whose infimum stored the lock
state; lock bits are reset on
the infimum */
/** Restore the explicit lock requests on a single record, where the
state was stored on the infimum of a page.
@param block buffer block containing rec
@param rec record whose lock state is restored
@param donator page (rec is not necessarily on this page)
whose infimum stored the lock state; lock bits are reset on the infimum */
void lock_rec_restore_from_page_infimum(const buf_block_t &block,
const rec_t *rec, page_id_t donator);
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate insert of
a record. If they do, first tests if the query thread should anyway
......@@ -266,8 +243,6 @@ lock queue.
dberr_t
lock_clust_rec_modify_check_and_lock(
/*=================================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
bit is set, does nothing */
const buf_block_t* block, /*!< in: buffer block of rec */
const rec_t* rec, /*!< in: record which should be
modified */
......@@ -423,7 +398,7 @@ lock_rec_unlock(
/*============*/
trx_t* trx, /*!< in/out: transaction that has
set a record lock */
const buf_block_t* block, /*!< in: buffer block containing rec */
const page_id_t id, /*!< in: page containing rec */
const rec_t* rec, /*!< in: record */
lock_mode lock_mode);/*!< in: LOCK_S or LOCK_X */
......@@ -589,16 +564,11 @@ lock_trx_has_sys_table_locks(
/** Check if the transaction holds an explicit exclusive lock on a record.
@param[in] trx transaction
@param[in] table table
@param[in] block leaf page
@param[in] id leaf page identifier
@param[in] heap_no heap number identifying the record
@return whether an explicit X-lock is held */
bool
lock_trx_has_expl_x_lock(
const trx_t* trx, /*!< in: transaction to check */
const dict_table_t* table, /*!< in: table to check */
const buf_block_t* block, /*!< in: buffer block of the record */
ulint heap_no)/*!< in: record heap number */
MY_ATTRIBUTE((nonnull, warn_unused_result));
bool lock_trx_has_expl_x_lock(const trx_t &trx, const dict_table_t &table,
page_id_t id, ulint heap_no);
#endif /* UNIV_DEBUG */
/** Lock operation struct */
......@@ -811,6 +781,7 @@ lock_rec_create_low(
dict_index_t* index,
trx_t* trx,
bool holds_trx_mutex);
/** Enqueue a waiting request for a lock which cannot be granted immediately.
Check for deadlocks.
@param[in] type_mode the requested lock mode (LOCK_S or LOCK_X)
......@@ -820,7 +791,8 @@ Check for deadlocks.
waiting lock request is set
when performing an insert of
an index record
@param[in] block leaf page in the index
@param[in] id page identifier
@param[in] page leaf page in the index
@param[in] heap_no record heap number in the block
@param[in] index index tree
@param[in,out] thr query thread
......@@ -833,7 +805,8 @@ lock_rec_enqueue_waiting(
lock_t* c_lock, /*!< conflicting lock */
#endif
unsigned type_mode,
const buf_block_t* block,
const page_id_t id,
const page_t* page,
ulint heap_no,
dict_index_t* index,
que_thr_t* thr,
......@@ -851,14 +824,10 @@ lock_rtr_move_rec_list(
moved */
ulint num_move); /*!< in: num of rec to move */
/*************************************************************//**
Removes record lock objects set on an index page which is discarded. This
/** Remove record locks for an index page which is discarded. This
function does not move locks, or check for waiting locks, therefore the
lock bitmaps must already be reset when this function is called. */
void
lock_rec_free_all_from_discard_page(
/*================================*/
const buf_block_t* block); /*!< in: page to be discarded */
void lock_rec_free_all_from_discard_page(const page_id_t page_id);
/** Cancel a waiting lock request and release possibly waiting transactions */
void lock_cancel_waiting_and_release(lock_t *lock);
......
......@@ -181,8 +181,7 @@ lock_prdt_rec_move(
/*===============*/
const buf_block_t* receiver, /*!< in: buffer block containing
the receiving record */
const buf_block_t* donator); /*!< in: buffer block containing
the donating record */
const page_id_t donator); /*!< in: target page */
/** Check whether there are R-tree Page lock on a page
@param[in] trx trx to test the lock
......@@ -191,12 +190,9 @@ lock_prdt_rec_move(
bool lock_test_prdt_page_lock(const trx_t *trx, const page_id_t page_id);
/** Removes predicate lock objects set on an index page which is discarded.
@param[in] block page to be discarded
@param[in] id page to be discarded
@param[in] lock_hash lock hash */
void
lock_prdt_page_free_from_discard(
/*=============================*/
const buf_block_t* block,
hash_table_t* lock_hash);
lock_prdt_page_free_from_discard(const page_id_t id, hash_table_t *lock_hash);
#endif
......@@ -527,16 +527,21 @@ lock_rec_get_next_const(
ulint heap_no,/*!< in: heap number of the record */
const lock_t* lock); /*!< in: lock */
/*********************************************************************//**
Gets the first explicit lock request on a record.
@return first lock, NULL if none exists */
UNIV_INLINE
lock_t*
lock_rec_get_first(
/*===============*/
hash_table_t* hash, /*!< in: hash chain the lock on */
const buf_block_t* block, /*!< in: block containing the record */
ulint heap_no);/*!< in: heap number of the record */
/** Get the first explicit lock request on a record.
@param hash lock hash table
@param id page identifier
@param heap_no record identifier in page
@return first lock
@retval nullptr if none exists */
inline lock_t*
lock_rec_get_first(hash_table_t *hash, const page_id_t id, ulint heap_no)
{
for (lock_t *lock= lock_sys.get_first(*hash, id);
lock; lock= lock_rec_get_next_on_page(lock))
if (lock_rec_get_nth_bit(lock, heap_no))
return lock;
return nullptr;
}
/*********************************************************************//**
Calculates if lock mode 1 is compatible with lock mode 2.
......
......@@ -136,25 +136,7 @@ lock_rec_get_next_const(
ulint heap_no,/*!< in: heap number of the record */
const lock_t* lock) /*!< in: lock */
{
return(lock_rec_get_next(heap_no, (lock_t*) lock));
}
/*********************************************************************//**
Gets the first explicit lock request on a record.
@return first lock, NULL if none exists */
UNIV_INLINE
lock_t*
lock_rec_get_first(
/*===============*/
hash_table_t* hash, /*!< in: hash chain the lock on */
const buf_block_t* block, /*!< in: block containing the record */
ulint heap_no)/*!< in: heap number of the record */
{
for (lock_t *lock= lock_sys.get_first(*hash, block->page.id());
lock; lock= lock_rec_get_next_on_page(lock))
if (lock_rec_get_nth_bit(lock, heap_no))
return lock;
return nullptr;
return lock_rec_get_next(heap_no, const_cast<lock_t*>(lock));
}
/*********************************************************************//**
......@@ -192,7 +174,7 @@ lock_rec_get_next_on_page_const(
{
ut_ad(!lock->is_table());
const page_id_t page_id(lock->un_member.rec_lock.page_id);
const page_id_t page_id{lock->un_member.rec_lock.page_id};
lock_sys.mutex_assert_locked();
while (!!(lock= static_cast<const lock_t*>(HASH_GET_NEXT(hash, lock))))
......
......@@ -793,17 +793,15 @@ lock_rec_has_expl(
LOCK_REC_NOT_GAP, for a
supremum record we regard this
always a gap type request */
const buf_block_t* block, /*!< in: buffer block containing
the record */
const page_id_t id, /*!< in: page identifier */
ulint heap_no,/*!< in: heap number of the record */
const trx_t* trx) /*!< in: transaction */
{
lock_sys.mutex_assert_locked();
ut_ad((precise_mode & LOCK_MODE_MASK) == LOCK_S
|| (precise_mode & LOCK_MODE_MASK) == LOCK_X);
ut_ad(!(precise_mode & LOCK_INSERT_INTENTION));
for (lock_t *lock= lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
for (lock_t *lock= lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
lock; lock= lock_rec_get_next(heap_no, lock))
if (lock->trx == trx &&
!(lock->type_mode & (LOCK_WAIT | LOCK_INSERT_INTENTION)) &&
......@@ -826,8 +824,7 @@ lock_t*
lock_rec_other_has_expl_req(
/*========================*/
lock_mode mode, /*!< in: LOCK_S or LOCK_X */
const buf_block_t* block, /*!< in: buffer block containing
the record */
const page_id_t id, /*!< in: page identifier */
bool wait, /*!< in: whether also waiting locks
are taken into account */
ulint heap_no,/*!< in: heap number of the record */
......@@ -845,8 +842,7 @@ lock_rec_other_has_expl_req(
return(NULL);
}
for (lock_t* lock = lock_rec_get_first(&lock_sys.rec_hash,
block, heap_no);
for (lock_t* lock= lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
......@@ -934,8 +930,7 @@ lock_rec_other_has_conflicting(
possibly ORed to LOCK_GAP or
LOC_REC_NOT_GAP,
LOCK_INSERT_INTENTION */
const buf_block_t* block, /*!< in: buffer block containing
the record */
const page_id_t id, /*!< in: page identifier */
ulint heap_no,/*!< in: heap number of the record */
const trx_t* trx) /*!< in: our transaction */
{
......@@ -945,7 +940,7 @@ lock_rec_other_has_conflicting(
bool is_supremum = (heap_no == PAGE_HEAP_NO_SUPREMUM);
for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
for (lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
......@@ -1301,7 +1296,8 @@ Check for deadlocks.
waiting lock request is set
when performing an insert of
an index record
@param[in] block leaf page in the index
@param[in] id page identifier
@param[in] page leaf page in the index
@param[in] heap_no record heap number in the block
@param[in] index index tree
@param[in,out] thr query thread
......@@ -1314,7 +1310,8 @@ lock_rec_enqueue_waiting(
lock_t* c_lock, /*!< conflicting lock */
#endif
unsigned type_mode,
const buf_block_t* block,
const page_id_t id,
const page_t* page,
ulint heap_no,
dict_index_t* index,
que_thr_t* thr,
......@@ -1348,11 +1345,11 @@ lock_rec_enqueue_waiting(
/* Enqueue the lock request that will wait to be granted, note that
we already own the trx mutex. */
lock_t* lock = lock_rec_create(
lock_t* lock = lock_rec_create_low(
#ifdef WITH_WSREP
c_lock, thr,
#endif
type_mode | LOCK_WAIT, block, heap_no, index, trx, TRUE);
type_mode | LOCK_WAIT, id, page, heap_no, index, trx, true);
if (prdt && type_mode & LOCK_PREDICATE) {
lock_prdt_set_prdt(lock, prdt);
......@@ -1415,7 +1412,8 @@ lock_rec_add_to_queue(
/*==================*/
unsigned type_mode,/*!< in: lock mode, wait, gap
etc. flags */
const buf_block_t* block, /*!< in: buffer block containing
const page_id_t id, /*!< in: page identifier */
const page_t* page, /*!< in: buffer block containing
the record */
ulint heap_no,/*!< in: heap number of the record */
dict_index_t* index, /*!< in: index of record */
......@@ -1444,7 +1442,7 @@ lock_rec_add_to_queue(
: LOCK_S;
const lock_t* other_lock
= lock_rec_other_has_expl_req(
mode, block, false, heap_no, trx);
mode, id, false, heap_no, trx);
#ifdef WITH_WSREP
if (UNIV_LIKELY_NULL(other_lock) && trx->is_wsrep()) {
/* Only BF transaction may be granted lock
......@@ -1476,46 +1474,37 @@ lock_rec_add_to_queue(
type_mode &= ~(LOCK_GAP | LOCK_REC_NOT_GAP);
}
lock_t* lock;
lock_t* first_lock;
/* Look for a waiting lock request on the same record or on a gap */
for (first_lock = lock = lock_sys.get_first(*lock_hash_get(type_mode),
block->page.id());
lock != NULL;
lock = lock_rec_get_next_on_page(lock)) {
if (lock->is_waiting()
&& lock_rec_get_nth_bit(lock, heap_no)) {
goto create;
if (type_mode & LOCK_WAIT) {
goto create;
} else if (lock_t *first_lock =
lock_sys.get_first(*lock_hash_get(type_mode), id)) {
for (lock_t* lock = first_lock;;) {
if (lock->is_waiting()
&& lock_rec_get_nth_bit(lock, heap_no)) {
goto create;
}
if (!(lock = lock_rec_get_next_on_page(lock))) {
break;
}
}
}
if (first_lock && !(type_mode & LOCK_WAIT)) {
/* Look for a similar record lock on the same page:
if one is found and there are no waiting lock requests,
we can just set the bit */
lock = lock_rec_find_similar_on_page(
type_mode, heap_no, first_lock, trx);
if (lock != NULL) {
if (lock_t* lock = lock_rec_find_similar_on_page(
type_mode, heap_no, first_lock, trx)) {
lock_rec_set_nth_bit(lock, heap_no);
return;
}
}
create:
lock_rec_create(
lock_rec_create_low(
#ifdef WITH_WSREP
NULL, NULL,
#endif
type_mode, block, heap_no, index, trx, caller_owns_trx_mutex);
type_mode, id, page, heap_no, index, trx,
caller_owns_trx_mutex);
}
/*********************************************************************//**
......@@ -1574,13 +1563,13 @@ lock_rec_lock(
lock_rec_get_n_bits(lock) <= heap_no)
{
/* Do nothing if the trx already has a strong enough lock on rec */
if (!lock_rec_has_expl(mode, block, heap_no, trx))
if (!lock_rec_has_expl(mode, id, heap_no, trx))
{
if (
#ifdef WITH_WSREP
lock_t *c_lock=
#endif
lock_rec_other_has_conflicting(mode, block, heap_no, trx))
lock_rec_other_has_conflicting(mode, id, heap_no, trx))
{
/*
If another transaction has a non-gap conflicting
......@@ -1591,12 +1580,13 @@ lock_rec_lock(
#ifdef WITH_WSREP
c_lock,
#endif /* WITH_WSREP */
mode, block, heap_no, index, thr, NULL);
mode, id, block->frame, heap_no, index, thr, nullptr);
}
else if (!impl)
{
/* Set the requested lock on the record. */
lock_rec_add_to_queue(mode, block, heap_no, index, trx, true);
lock_rec_add_to_queue(mode, id, block->frame, heap_no, index,
trx, true);
err= DB_SUCCESS_LOCKED_REC;
}
}
......@@ -2025,16 +2015,11 @@ static void lock_rec_free_all_from_discard_page_low(const page_id_t id,
}
}
/*************************************************************//**
Removes record lock objects set on an index page which is discarded. This
/** Remove record locks for an index page which is discarded. This
function does not move locks, or check for waiting locks, therefore the
lock bitmaps must already be reset when this function is called. */
void
lock_rec_free_all_from_discard_page(
/*================================*/
const buf_block_t* block) /*!< in: page to be discarded */
void lock_rec_free_all_from_discard_page(const page_id_t page_id)
{
const page_id_t page_id(block->page.id());
lock_rec_free_all_from_discard_page_low(page_id, &lock_sys.rec_hash);
lock_rec_free_all_from_discard_page_low(page_id, &lock_sys.prdt_hash);
lock_rec_free_all_from_discard_page_low(page_id, &lock_sys.prdt_page_hash);
......@@ -2050,11 +2035,10 @@ void
lock_rec_reset_and_release_wait_low(
/*================================*/
hash_table_t* hash, /*!< in: hash table */
const buf_block_t* block, /*!< in: buffer block containing
the record */
const page_id_t id, /*!< in: page identifier */
ulint heap_no)/*!< in: heap number of record */
{
for (lock_t *lock= lock_rec_get_first(hash, block, heap_no); lock;
for (lock_t *lock= lock_rec_get_first(hash, id, heap_no); lock;
lock= lock_rec_get_next(heap_no, lock))
if (lock->is_waiting())
lock_rec_cancel(lock);
......@@ -2069,17 +2053,16 @@ static
void
lock_rec_reset_and_release_wait(
/*============================*/
const buf_block_t* block, /*!< in: buffer block containing
the record */
const page_id_t id, /*!< in: page identifier */
ulint heap_no)/*!< in: heap number of record */
{
lock_rec_reset_and_release_wait_low(
&lock_sys.rec_hash, block, heap_no);
&lock_sys.rec_hash, id, heap_no);
lock_rec_reset_and_release_wait_low(
&lock_sys.prdt_hash, block, PAGE_HEAP_NO_INFIMUM);
&lock_sys.prdt_hash, id, PAGE_HEAP_NO_INFIMUM);
lock_rec_reset_and_release_wait_low(
&lock_sys.prdt_page_hash, block, PAGE_HEAP_NO_INFIMUM);
&lock_sys.prdt_page_hash, id, PAGE_HEAP_NO_INFIMUM);
}
/*************************************************************//**
......@@ -2091,19 +2074,18 @@ static
void
lock_rec_inherit_to_gap(
/*====================*/
const buf_block_t* heir_block, /*!< in: block containing the
const page_id_t heir, /*!< in: page containing the
record which inherits */
const buf_block_t* block, /*!< in: block containing the
const page_id_t id, /*!< in: page containing the
record from which inherited;
does NOT reset the locks on
this record */
const page_t* heir_page, /*!< in: heir page frame */
ulint heir_heap_no, /*!< in: heap_no of the
inheriting record */
ulint heap_no) /*!< in: heap_no of the
donating record */
{
lock_t* lock;
lock_sys.mutex_assert_locked();
/* At READ UNCOMMITTED or READ COMMITTED isolation level,
......@@ -2112,7 +2094,7 @@ lock_rec_inherit_to_gap(
DO want S-locks/X-locks(taken for replace) set by a consistency
constraint to be inherited also then. */
for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
for (lock_t* lock= lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
......@@ -2121,7 +2103,8 @@ lock_rec_inherit_to_gap(
|| lock->mode() !=
(lock->trx->duplicates ? LOCK_S : LOCK_X))) {
lock_rec_add_to_queue(LOCK_GAP | lock->mode(),
heir_block, heir_heap_no,
heir, heir_page,
heir_heap_no,
lock->index, lock->trx, false);
}
}
......@@ -2143,26 +2126,16 @@ lock_rec_inherit_to_gap_if_gap_lock(
does NOT reset the locks
on this record */
{
lock_t* lock;
lock_sys.mutex_lock();
for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
lock != NULL;
lock = lock_rec_get_next(heap_no, lock)) {
if (!lock->is_insert_intention()
&& (heap_no == PAGE_HEAP_NO_SUPREMUM
|| !lock->is_record_not_gap())
&& !lock_table_has(lock->trx, lock->index->table,
LOCK_X)) {
lock_rec_add_to_queue(LOCK_GAP | lock->mode(), block,
heir_heap_no,
lock->index, lock->trx, false);
}
}
const page_id_t id{block->page.id()};
LockMutexGuard g;
lock_sys.mutex_unlock();
for (lock_t *lock= lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
lock; lock= lock_rec_get_next(heap_no, lock))
if (!lock->is_insert_intention() && (heap_no == PAGE_HEAP_NO_SUPREMUM ||
!lock->is_record_not_gap()) &&
!lock_table_has(lock->trx, lock->index->table, LOCK_X))
lock_rec_add_to_queue(LOCK_GAP | lock->mode(), id, block->frame,
heir_heap_no, lock->index, lock->trx, false);
}
/*************************************************************//**
......@@ -2173,9 +2146,9 @@ void
lock_rec_move_low(
/*==============*/
hash_table_t* lock_hash, /*!< in: hash table to use */
const buf_block_t* receiver, /*!< in: buffer block containing
const buf_block_t& receiver, /*!< in: buffer block containing
the receiving record */
const buf_block_t* donator, /*!< in: buffer block containing
const page_id_t donator_id, /*!< in: page identifier of
the donating record */
ulint receiver_heap_no,/*!< in: heap_no of the record
which gets the locks; there
......@@ -2184,18 +2157,17 @@ lock_rec_move_low(
ulint donator_heap_no)/*!< in: heap_no of the record
which gives the locks */
{
lock_t* lock;
const page_id_t receiver_id{receiver.page.id()};
lock_sys.mutex_assert_locked();
/* If the lock is predicate lock, it resides on INFIMUM record */
ut_ad(lock_rec_get_first(
lock_hash, receiver, receiver_heap_no) == NULL
ut_ad(!lock_rec_get_first(lock_hash, receiver_id, receiver_heap_no)
|| lock_hash == &lock_sys.prdt_hash
|| lock_hash == &lock_sys.prdt_page_hash);
for (lock = lock_rec_get_first(lock_hash,
donator, donator_heap_no);
for (lock_t *lock =
lock_rec_get_first(lock_hash, donator_id, donator_heap_no);
lock != NULL;
lock = lock_rec_get_next(donator_heap_no, lock)) {
......@@ -2208,14 +2180,15 @@ lock_rec_move_low(
}
/* Note that we FIRST reset the bit, and then set the lock:
the function works also if donator == receiver */
the function works also if donator_id == receiver_id */
lock_rec_add_to_queue(type_mode, receiver, receiver_heap_no,
lock_rec_add_to_queue(type_mode, receiver_id, receiver.frame,
receiver_heap_no,
lock->index, lock->trx, false);
}
ut_ad(!lock_rec_get_first(&lock_sys.rec_hash,
donator, donator_heap_no));
donator_id, donator_heap_no));
}
/** Move all the granted locks to the front of the given lock list.
......@@ -2258,9 +2231,9 @@ UNIV_INLINE
void
lock_rec_move(
/*==========*/
const buf_block_t* receiver, /*!< in: buffer block containing
const buf_block_t& receiver, /*!< in: buffer block containing
the receiving record */
const buf_block_t* donator, /*!< in: buffer block containing
const page_id_t donator_id, /*!< in: page identifier of
the donating record */
ulint receiver_heap_no,/*!< in: heap_no of the record
which gets the locks; there
......@@ -2269,8 +2242,8 @@ lock_rec_move(
ulint donator_heap_no)/*!< in: heap_no of the record
which gives the locks */
{
lock_rec_move_low(&lock_sys.rec_hash, receiver, donator,
receiver_heap_no, donator_heap_no);
lock_rec_move_low(&lock_sys.rec_hash, receiver, donator_id,
receiver_heap_no, donator_heap_no);
}
/*************************************************************//**
......@@ -2290,11 +2263,12 @@ lock_move_reorganize_page(
UT_LIST_BASE_NODE_T(lock_t) old_locks;
mem_heap_t* heap = NULL;
ulint comp;
const page_id_t id{block->page.id()};
lock_sys.mutex_lock();
/* FIXME: This needs to deal with predicate lock too */
lock = lock_sys.get_first(block->page.id());
lock = lock_sys.get_first(id);
if (lock == NULL) {
lock_sys.mutex_unlock();
......@@ -2380,7 +2354,8 @@ lock_move_reorganize_page(
small for the new heap number! */
lock_rec_add_to_queue(
lock->type_mode, block, new_heap_no,
lock->type_mode, id, block->frame,
new_heap_no,
lock->index, lock->trx, FALSE);
}
......@@ -2416,110 +2391,102 @@ lock_move_rec_list_end(
const rec_t* rec) /*!< in: record on page: this
is the first record moved */
{
lock_t* lock;
const ulint comp = page_rec_is_comp(rec);
ut_ad(buf_block_get_frame(block) == page_align(rec));
ut_ad(comp == page_is_comp(buf_block_get_frame(new_block)));
lock_sys.mutex_lock();
const ulint comp= page_rec_is_comp(rec);
/* Note: when we move locks from record to record, waiting locks
and possible granted gap type locks behind them are enqueued in
the original order, because new elements are inserted to a hash
table to the end of the hash chain, and lock_rec_add_to_queue
does not reuse locks if there are waiters in the queue. */
ut_ad(block->frame == page_align(rec));
ut_ad(comp == page_is_comp(new_block->frame));
for (lock = lock_sys.get_first(block->page.id());
lock;
lock = lock_rec_get_next_on_page(lock)) {
const rec_t* rec1 = rec;
const rec_t* rec2;
const auto type_mode = lock->type_mode;
if (comp) {
if (page_offset(rec1) == PAGE_NEW_INFIMUM) {
rec1 = page_rec_get_next_low(rec1, TRUE);
}
rec2 = page_rec_get_next_low(
buf_block_get_frame(new_block)
+ PAGE_NEW_INFIMUM, TRUE);
} else {
if (page_offset(rec1) == PAGE_OLD_INFIMUM) {
rec1 = page_rec_get_next_low(rec1, FALSE);
}
rec2 = page_rec_get_next_low(
buf_block_get_frame(new_block)
+ PAGE_OLD_INFIMUM, FALSE);
}
/* Copy lock requests on user records to new page and
reset the lock bits on the old */
for (;;) {
ut_ad(page_rec_is_metadata(rec1)
== page_rec_is_metadata(rec2));
ut_d(const rec_t* const orec = rec1);
ulint rec1_heap_no;
ulint rec2_heap_no;
const page_id_t id{block->page.id()};
const page_id_t new_id{new_block->page.id()};
{
LockMutexGuard g;
if (comp) {
rec1_heap_no = rec_get_heap_no_new(rec1);
/* Note: when we move locks from record to record, waiting locks
and possible granted gap type locks behind them are enqueued in
the original order, because new elements are inserted to a hash
table to the end of the hash chain, and lock_rec_add_to_queue
does not reuse locks if there are waiters in the queue. */
for (lock_t *lock= lock_sys.get_first(id); lock;
lock= lock_rec_get_next_on_page(lock))
{
const rec_t *rec1= rec;
const rec_t *rec2;
const auto type_mode= lock->type_mode;
if (rec1_heap_no == PAGE_HEAP_NO_SUPREMUM) {
break;
}
if (comp)
{
if (page_offset(rec1) == PAGE_NEW_INFIMUM)
rec1= page_rec_get_next_low(rec1, TRUE);
rec2= page_rec_get_next_low(new_block->frame + PAGE_NEW_INFIMUM, TRUE);
}
else
{
if (page_offset(rec1) == PAGE_OLD_INFIMUM)
rec1= page_rec_get_next_low(rec1, FALSE);
rec2= page_rec_get_next_low(new_block->frame + PAGE_OLD_INFIMUM,FALSE);
}
rec2_heap_no = rec_get_heap_no_new(rec2);
rec1 = page_rec_get_next_low(rec1, TRUE);
rec2 = page_rec_get_next_low(rec2, TRUE);
} else {
rec1_heap_no = rec_get_heap_no_old(rec1);
/* Copy lock requests on user records to new page and
reset the lock bits on the old */
for (;;)
{
ut_ad(page_rec_is_metadata(rec1) == page_rec_is_metadata(rec2));
ut_d(const rec_t* const orec= rec1);
if (rec1_heap_no == PAGE_HEAP_NO_SUPREMUM) {
break;
}
ulint rec1_heap_no;
ulint rec2_heap_no;
rec2_heap_no = rec_get_heap_no_old(rec2);
if (comp)
{
rec1_heap_no= rec_get_heap_no_new(rec1);
if (rec1_heap_no == PAGE_HEAP_NO_SUPREMUM)
break;
ut_ad(rec_get_data_size_old(rec1)
== rec_get_data_size_old(rec2));
rec2_heap_no= rec_get_heap_no_new(rec2);
rec1= page_rec_get_next_low(rec1, TRUE);
rec2= page_rec_get_next_low(rec2, TRUE);
}
else
{
rec1_heap_no= rec_get_heap_no_old(rec1);
ut_ad(!memcmp(rec1, rec2,
rec_get_data_size_old(rec1)));
if (rec1_heap_no == PAGE_HEAP_NO_SUPREMUM)
break;
rec2_heap_no= rec_get_heap_no_old(rec2);
rec1 = page_rec_get_next_low(rec1, FALSE);
rec2 = page_rec_get_next_low(rec2, FALSE);
}
ut_ad(rec_get_data_size_old(rec1) == rec_get_data_size_old(rec2));
ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec1)));
if (rec1_heap_no < lock->un_member.rec_lock.n_bits
&& lock_rec_reset_nth_bit(lock, rec1_heap_no)) {
ut_ad(!page_rec_is_metadata(orec));
rec1= page_rec_get_next_low(rec1, FALSE);
rec2= page_rec_get_next_low(rec2, FALSE);
}
if (type_mode & LOCK_WAIT) {
ut_ad(lock->trx->lock.wait_lock==lock);
lock->type_mode &= ~LOCK_WAIT;
}
if (rec1_heap_no < lock->un_member.rec_lock.n_bits &&
lock_rec_reset_nth_bit(lock, rec1_heap_no))
{
ut_ad(!page_rec_is_metadata(orec));
lock_rec_add_to_queue(
type_mode, new_block, rec2_heap_no,
lock->index, lock->trx, FALSE);
}
}
}
if (type_mode & LOCK_WAIT)
{
ut_ad(lock->trx->lock.wait_lock == lock);
lock->type_mode&= ~LOCK_WAIT;
}
lock_sys.mutex_unlock();
lock_rec_add_to_queue(type_mode, new_id, new_block->frame,
rec2_heap_no, lock->index, lock->trx, false);
}
}
}
}
#ifdef UNIV_DEBUG_LOCK_VALIDATE
if (fil_space_t* space = fil_space_t::get(page_id.space())) {
const bool is_latched{space->is_latched()};
ut_ad(lock_rec_validate_page(block, is_latched));
ut_ad(lock_rec_validate_page(new_block, is_latched));
space->release();
}
if (fil_space_t *space= fil_space_t::get(id.space()))
{
const bool is_latched{space->is_latched()};
ut_ad(lock_rec_validate_page(block, is_latched));
ut_ad(lock_rec_validate_page(new_block, is_latched));
space->release();
}
#endif
}
......@@ -2541,99 +2508,92 @@ lock_move_rec_list_start(
before the records
were copied */
{
lock_t* lock;
const ulint comp = page_rec_is_comp(rec);
const ulint comp= page_rec_is_comp(rec);
ut_ad(block->frame == page_align(rec));
ut_ad(new_block->frame == page_align(old_end));
ut_ad(comp == page_rec_is_comp(old_end));
ut_ad(!page_rec_is_metadata(rec));
ut_ad(block->frame == page_align(rec));
ut_ad(comp == page_is_comp(new_block->frame));
ut_ad(new_block->frame == page_align(old_end));
ut_ad(!page_rec_is_metadata(rec));
const page_id_t id{block->page.id()};
const page_id_t new_id{new_block->page.id()};
lock_sys.mutex_lock();
{
LockMutexGuard g;
for (lock = lock_sys.get_first(block->page.id());
lock;
lock = lock_rec_get_next_on_page(lock)) {
const rec_t* rec1;
const rec_t* rec2;
const auto type_mode = lock->type_mode;
if (comp) {
rec1 = page_rec_get_next_low(
buf_block_get_frame(block)
+ PAGE_NEW_INFIMUM, TRUE);
rec2 = page_rec_get_next_low(old_end, TRUE);
} else {
rec1 = page_rec_get_next_low(
buf_block_get_frame(block)
+ PAGE_OLD_INFIMUM, FALSE);
rec2 = page_rec_get_next_low(old_end, FALSE);
}
for (lock_t *lock= lock_sys.get_first(id); lock;
lock= lock_rec_get_next_on_page(lock))
{
const rec_t *rec1;
const rec_t *rec2;
const auto type_mode= lock->type_mode;
/* Copy lock requests on user records to new page and
reset the lock bits on the old */
if (comp)
{
rec1= page_rec_get_next_low(block->frame + PAGE_NEW_INFIMUM, TRUE);
rec2= page_rec_get_next_low(old_end, TRUE);
}
else
{
rec1= page_rec_get_next_low(block->frame + PAGE_OLD_INFIMUM, FALSE);
rec2= page_rec_get_next_low(old_end, FALSE);
}
while (rec1 != rec) {
ut_ad(page_rec_is_metadata(rec1)
== page_rec_is_metadata(rec2));
ut_d(const rec_t* const prev = rec1);
/* Copy lock requests on user records to new page and
reset the lock bits on the old */
ulint rec1_heap_no;
ulint rec2_heap_no;
while (rec1 != rec)
{
ut_ad(page_rec_is_metadata(rec1) == page_rec_is_metadata(rec2));
ut_d(const rec_t* const prev= rec1);
if (comp) {
rec1_heap_no = rec_get_heap_no_new(rec1);
rec2_heap_no = rec_get_heap_no_new(rec2);
ulint rec1_heap_no;
ulint rec2_heap_no;
rec1 = page_rec_get_next_low(rec1, TRUE);
rec2 = page_rec_get_next_low(rec2, TRUE);
} else {
rec1_heap_no = rec_get_heap_no_old(rec1);
rec2_heap_no = rec_get_heap_no_old(rec2);
if (comp)
{
rec1_heap_no= rec_get_heap_no_new(rec1);
rec2_heap_no= rec_get_heap_no_new(rec2);
ut_ad(!memcmp(rec1, rec2,
rec_get_data_size_old(rec2)));
rec1= page_rec_get_next_low(rec1, TRUE);
rec2= page_rec_get_next_low(rec2, TRUE);
}
else
{
rec1_heap_no= rec_get_heap_no_old(rec1);
rec2_heap_no= rec_get_heap_no_old(rec2);
rec1 = page_rec_get_next_low(rec1, FALSE);
rec2 = page_rec_get_next_low(rec2, FALSE);
}
ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec2)));
if (rec1_heap_no < lock->un_member.rec_lock.n_bits
&& lock_rec_reset_nth_bit(lock, rec1_heap_no)) {
ut_ad(!page_rec_is_metadata(prev));
rec1= page_rec_get_next_low(rec1, FALSE);
rec2= page_rec_get_next_low(rec2, FALSE);
}
if (type_mode & LOCK_WAIT) {
ut_ad(lock->trx->lock.wait_lock==lock);
lock->type_mode &= ~LOCK_WAIT;
}
if (rec1_heap_no < lock->un_member.rec_lock.n_bits &&
lock_rec_reset_nth_bit(lock, rec1_heap_no))
{
ut_ad(!page_rec_is_metadata(prev));
lock_rec_add_to_queue(
type_mode, new_block, rec2_heap_no,
lock->index, lock->trx, FALSE);
}
}
if (type_mode & LOCK_WAIT)
{
ut_ad(lock->trx->lock.wait_lock == lock);
lock->type_mode&= ~LOCK_WAIT;
}
lock_rec_add_to_queue(type_mode, new_id, new_block->frame,
rec2_heap_no, lock->index, lock->trx, false);
}
}
#ifdef UNIV_DEBUG
if (page_rec_is_supremum(rec)) {
ulint i;
for (i = PAGE_HEAP_NO_USER_LOW;
i < lock_rec_get_n_bits(lock); i++) {
if (lock_rec_get_nth_bit(lock, i)) {
ib::fatal()
<< "lock_move_rec_list_start():"
<< i << " not moved in "
<< (void*) lock;
}
}
}
if (page_rec_is_supremum(rec))
for (auto i= lock_rec_get_n_bits(lock); --i > PAGE_HEAP_NO_USER_LOW; )
ut_ad(!lock_rec_get_nth_bit(lock, i));
#endif /* UNIV_DEBUG */
}
lock_sys.mutex_unlock();
}
}
#ifdef UNIV_DEBUG_LOCK_VALIDATE
ut_ad(lock_rec_validate_page(block));
ut_ad(lock_rec_validate_page(block));
#endif
}
......@@ -2650,75 +2610,73 @@ lock_rtr_move_rec_list(
moved */
ulint num_move) /*!< in: num of rec to move */
{
lock_t* lock;
ulint comp;
if (!num_move) {
return;
}
comp = page_rec_is_comp(rec_move[0].old_rec);
if (!num_move)
return;
ut_ad(block->frame == page_align(rec_move[0].old_rec));
ut_ad(new_block->frame == page_align(rec_move[0].new_rec));
ut_ad(comp == page_rec_is_comp(rec_move[0].new_rec));
const ulint comp= page_rec_is_comp(rec_move[0].old_rec);
lock_sys.mutex_lock();
for (lock = lock_sys.get_first(block->page.id());
lock;
lock = lock_rec_get_next_on_page(lock)) {
ulint moved = 0;
const rec_t* rec1;
const rec_t* rec2;
const auto type_mode = lock->type_mode;
ut_ad(block->frame == page_align(rec_move[0].old_rec));
ut_ad(new_block->frame == page_align(rec_move[0].new_rec));
ut_ad(comp == page_rec_is_comp(rec_move[0].new_rec));
const page_id_t id{block->page.id()};
const page_id_t new_id{new_block->page.id()};
/* Copy lock requests on user records to new page and
reset the lock bits on the old */
{
LockMutexGuard g;
while (moved < num_move) {
ulint rec1_heap_no;
ulint rec2_heap_no;
for (lock_t *lock= lock_sys.get_first(id); lock;
lock= lock_rec_get_next_on_page(lock))
{
const rec_t *rec1;
const rec_t *rec2;
const auto type_mode= lock->type_mode;
rec1 = rec_move[moved].old_rec;
rec2 = rec_move[moved].new_rec;
ut_ad(!page_rec_is_metadata(rec1));
ut_ad(!page_rec_is_metadata(rec2));
/* Copy lock requests on user records to new page and
reset the lock bits on the old */
if (comp) {
rec1_heap_no = rec_get_heap_no_new(rec1);
rec2_heap_no = rec_get_heap_no_new(rec2);
for (ulint moved= 0; moved < num_move; moved++)
{
ulint rec1_heap_no;
ulint rec2_heap_no;
} else {
rec1_heap_no = rec_get_heap_no_old(rec1);
rec2_heap_no = rec_get_heap_no_old(rec2);
rec1= rec_move[moved].old_rec;
rec2= rec_move[moved].new_rec;
ut_ad(!page_rec_is_metadata(rec1));
ut_ad(!page_rec_is_metadata(rec2));
ut_ad(!memcmp(rec1, rec2,
rec_get_data_size_old(rec2)));
}
if (comp)
{
rec1_heap_no= rec_get_heap_no_new(rec1);
rec2_heap_no= rec_get_heap_no_new(rec2);
}
else
{
rec1_heap_no= rec_get_heap_no_old(rec1);
rec2_heap_no= rec_get_heap_no_old(rec2);
if (rec1_heap_no < lock->un_member.rec_lock.n_bits
&& lock_rec_reset_nth_bit(lock, rec1_heap_no)) {
if (type_mode & LOCK_WAIT) {
ut_ad(lock->trx->lock.wait_lock==lock);
lock->type_mode &= ~LOCK_WAIT;
}
ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec2)));
}
lock_rec_add_to_queue(
type_mode, new_block, rec2_heap_no,
lock->index, lock->trx, FALSE);
if (rec1_heap_no < lock->un_member.rec_lock.n_bits &&
lock_rec_reset_nth_bit(lock, rec1_heap_no))
{
if (type_mode & LOCK_WAIT)
{
ut_ad(lock->trx->lock.wait_lock == lock);
lock->type_mode&= ~LOCK_WAIT;
}
rec_move[moved].moved = true;
}
lock_rec_add_to_queue(type_mode, new_id, new_block->frame,
rec2_heap_no, lock->index, lock->trx, false);
moved++;
}
rec_move[moved].moved= true;
}
lock_sys.mutex_unlock();
}
}
}
#ifdef UNIV_DEBUG_LOCK_VALIDATE
ut_ad(lock_rec_validate_page(block));
ut_ad(lock_rec_validate_page(block));
#endif
}
/*************************************************************//**
......@@ -2729,23 +2687,20 @@ lock_update_split_right(
const buf_block_t* right_block, /*!< in: right page */
const buf_block_t* left_block) /*!< in: left page */
{
ulint heap_no = lock_get_min_heap_no(right_block);
lock_sys.mutex_lock();
const ulint h= lock_get_min_heap_no(right_block);
const page_id_t l{left_block->page.id()};
const page_id_t r{right_block->page.id()};
/* Move the locks on the supremum of the left page to the supremum
of the right page */
lock_rec_move(right_block, left_block,
PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
LockMutexGuard g;
/* Inherit the locks to the supremum of left page from the successor
of the infimum on right page */
/* Move the locks on the supremum of the left page to the supremum
of the right page */
lock_rec_inherit_to_gap(left_block, right_block,
PAGE_HEAP_NO_SUPREMUM, heap_no);
lock_rec_move(*right_block, l, PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
lock_sys.mutex_unlock();
/* Inherit the locks to the supremum of left page from the successor
of the infimum on right page */
lock_rec_inherit_to_gap(l, r, left_block->frame, PAGE_HEAP_NO_SUPREMUM, h);
}
/*************************************************************//**
......@@ -2765,13 +2720,15 @@ lock_update_merge_right(
{
ut_ad(!page_rec_is_metadata(orig_succ));
lock_sys.mutex_lock();
const page_id_t l{left_block->page.id()};
const page_id_t r{right_block->page.id()};
LockMutexGuard g;
/* Inherit the locks from the supremum of the left page to the
original successor of infimum on the right page, to which the left
page was merged */
lock_rec_inherit_to_gap(right_block, left_block,
lock_rec_inherit_to_gap(r, l, right_block->frame,
page_rec_get_heap_no(orig_succ),
PAGE_HEAP_NO_SUPREMUM);
......@@ -2779,61 +2736,37 @@ lock_update_merge_right(
waiting transactions */
lock_rec_reset_and_release_wait_low(
&lock_sys.rec_hash, left_block, PAGE_HEAP_NO_SUPREMUM);
&lock_sys.rec_hash, l, PAGE_HEAP_NO_SUPREMUM);
/* there should exist no page lock on the left page,
otherwise, it will be blocked from merge */
ut_ad(!lock_sys.get_first_prdt_page(left_block->page.id()));
ut_ad(!lock_sys.get_first_prdt_page(l));
lock_rec_free_all_from_discard_page(left_block);
lock_sys.mutex_unlock();
lock_rec_free_all_from_discard_page(l);
}
/*************************************************************//**
Updates the lock table when the root page is copied to another in
btr_root_raise_and_insert. Note that we leave lock structs on the
/** Update locks when the root page is copied to another in
btr_root_raise_and_insert(). Note that we leave lock structs on the
root page, even though they do not make sense on other than leaf
pages: the reason is that in a pessimistic update the infimum record
of the root page will act as a dummy carrier of the locks of the record
to be updated. */
void
lock_update_root_raise(
/*===================*/
const buf_block_t* block, /*!< in: index page to which copied */
const buf_block_t* root) /*!< in: root page */
void lock_update_root_raise(const buf_block_t &block, const page_id_t root)
{
lock_sys.mutex_lock();
/* Move the locks on the supremum of the root to the supremum
of block */
lock_rec_move(block, root,
PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
lock_sys.mutex_unlock();
LockMutexGuard g;
/* Move the locks on the supremum of the root to the supremum of block */
lock_rec_move(block, root, PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
}
/*************************************************************//**
Updates the lock table when a page is copied to another and the original page
is removed from the chain of leaf pages, except if page is the root! */
void
lock_update_copy_and_discard(
/*=========================*/
const buf_block_t* new_block, /*!< in: index page to
which copied */
const buf_block_t* block) /*!< in: index page;
NOT the root! */
/** Update the lock table when a page is copied to another.
@param new_block the target page
@param old old page (not index root page) */
void lock_update_copy_and_discard(const buf_block_t &new_block, page_id_t old)
{
lock_sys.mutex_lock();
/* Move the locks on the supremum of the old page to the supremum
of new_page */
lock_rec_move(new_block, block,
PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
lock_rec_free_all_from_discard_page(block);
lock_sys.mutex_unlock();
LockMutexGuard g;
/* Move the locks on the supremum of the old page to the supremum of new */
lock_rec_move(new_block, old, PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
lock_rec_free_all_from_discard_page(old);
}
/*************************************************************//**
......@@ -2844,69 +2777,51 @@ lock_update_split_left(
const buf_block_t* right_block, /*!< in: right page */
const buf_block_t* left_block) /*!< in: left page */
{
ulint heap_no = lock_get_min_heap_no(right_block);
lock_sys.mutex_lock();
/* Inherit the locks to the supremum of the left page from the
successor of the infimum on the right page */
lock_rec_inherit_to_gap(left_block, right_block,
PAGE_HEAP_NO_SUPREMUM, heap_no);
lock_sys.mutex_unlock();
ulint h= lock_get_min_heap_no(right_block);
const page_id_t l{left_block->page.id()};
const page_id_t r{right_block->page.id()};
LockMutexGuard g;
/* Inherit the locks to the supremum of the left page from the
successor of the infimum on the right page */
lock_rec_inherit_to_gap(l, r, left_block->frame, PAGE_HEAP_NO_SUPREMUM, h);
}
/*************************************************************//**
Updates the lock table when a page is merged to the left. */
void
lock_update_merge_left(
/*===================*/
const buf_block_t* left_block, /*!< in: left page to
which merged */
const rec_t* orig_pred, /*!< in: original predecessor
of supremum on the left page
before merge */
const buf_block_t* right_block) /*!< in: merged index page
which will be discarded */
/** Update the lock table when a page is merged to the left.
@param left left page
@param orig_pred original predecessor of supremum on the left page before merge
@param right merged, to-be-discarded right page */
void lock_update_merge_left(const buf_block_t& left, const rec_t *orig_pred,
const page_id_t right)
{
const rec_t* left_next_rec;
ut_ad(left.frame == page_align(orig_pred));
ut_ad(left_block->frame == page_align(orig_pred));
lock_sys.mutex_lock();
left_next_rec = page_rec_get_next_const(orig_pred);
if (!page_rec_is_supremum(left_next_rec)) {
/* Inherit the locks on the supremum of the left page to the
first record which was moved from the right page */
lock_rec_inherit_to_gap(left_block, left_block,
page_rec_get_heap_no(left_next_rec),
PAGE_HEAP_NO_SUPREMUM);
/* Reset the locks on the supremum of the left page,
releasing waiting transactions */
lock_rec_reset_and_release_wait_low(
&lock_sys.rec_hash, left_block, PAGE_HEAP_NO_SUPREMUM);
}
const page_id_t l{left.page.id()};
/* Move the locks from the supremum of right page to the supremum
of the left page */
lock_rec_move(left_block, right_block,
PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
LockMutexGuard g;
const rec_t *left_next_rec= page_rec_get_next_const(orig_pred);
/* there should exist no page lock on the right page,
otherwise, it will be blocked from merge */
ut_ad(!lock_sys.get_first_prdt_page(right_block->page.id()));
if (!page_rec_is_supremum(left_next_rec))
{
/* Inherit the locks on the supremum of the left page to the
first record which was moved from the right page */
lock_rec_inherit_to_gap(l, l, left.frame,
page_rec_get_heap_no(left_next_rec),
PAGE_HEAP_NO_SUPREMUM);
/* Reset the locks on the supremum of the left page,
releasing waiting transactions */
lock_rec_reset_and_release_wait_low(&lock_sys.rec_hash, l,
PAGE_HEAP_NO_SUPREMUM);
}
lock_rec_free_all_from_discard_page(right_block);
/* Move the locks from the supremum of right page to the supremum
of the left page */
lock_rec_move(left, right, PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM);
lock_sys.mutex_unlock();
/* there should exist no page lock on the right page,
otherwise, it will be blocked from merge */
ut_ad(!lock_sys.get_first_prdt_page(right));
lock_rec_free_all_from_discard_page(right);
}
/*************************************************************//**
......@@ -2915,9 +2830,9 @@ inherited from rec. */
void
lock_rec_reset_and_inherit_gap_locks(
/*=================================*/
const buf_block_t* heir_block, /*!< in: block containing the
const buf_block_t& heir_block, /*!< in: block containing the
record which inherits */
const buf_block_t* block, /*!< in: block containing the
const page_id_t donor, /*!< in: page containing the
record from which inherited;
does NOT reset the locks on
this record */
......@@ -2926,13 +2841,10 @@ lock_rec_reset_and_inherit_gap_locks(
ulint heap_no) /*!< in: heap_no of the
donating record */
{
lock_sys.mutex_lock();
lock_rec_reset_and_release_wait(heir_block, heir_heap_no);
lock_rec_inherit_to_gap(heir_block, block, heir_heap_no, heap_no);
lock_sys.mutex_unlock();
const page_id_t heir{heir_block.page.id()};
LockMutexGuard g;
lock_rec_reset_and_release_wait(heir, heir_heap_no);
lock_rec_inherit_to_gap(heir, donor, heir_block.frame, heir_heap_no, heap_no);
}
/*************************************************************//**
......@@ -2950,9 +2862,10 @@ lock_update_discard(
const page_t* page = block->frame;
const rec_t* rec;
ulint heap_no;
const page_id_t heir(heir_block->page.id());
const page_id_t page_id(block->page.id());
lock_sys.mutex_lock();
LockMutexGuard g;
if (lock_sys.get_first(page_id)) {
ut_ad(!lock_sys.get_first_prdt(page_id));
......@@ -2966,11 +2879,12 @@ lock_update_discard(
do {
heap_no = rec_get_heap_no_new(rec);
lock_rec_inherit_to_gap(heir_block, block,
lock_rec_inherit_to_gap(heir, page_id,
heir_block->frame,
heir_heap_no, heap_no);
lock_rec_reset_and_release_wait(
block, heap_no);
page_id, heap_no);
rec = page + rec_get_next_offs(rec, TRUE);
} while (heap_no != PAGE_HEAP_NO_SUPREMUM);
......@@ -2980,11 +2894,12 @@ lock_update_discard(
do {
heap_no = rec_get_heap_no_old(rec);
lock_rec_inherit_to_gap(heir_block, block,
lock_rec_inherit_to_gap(heir, page_id,
heir_block->frame,
heir_heap_no, heap_no);
lock_rec_reset_and_release_wait(
block, heap_no);
page_id, heap_no);
rec = page + rec_get_next_offs(rec, FALSE);
} while (heap_no != PAGE_HEAP_NO_SUPREMUM);
......@@ -2998,8 +2913,6 @@ lock_update_discard(
lock_rec_free_all_from_discard_page_low(
page_id, &lock_sys.prdt_page_hash);
}
lock_sys.mutex_unlock();
}
/*************************************************************//**
......@@ -3060,17 +2973,16 @@ lock_update_delete(
FALSE));
}
lock_sys.mutex_lock();
const page_id_t id{block->page.id()};
LockMutexGuard g;
/* Let the next record inherit the locks from rec, in gap mode */
lock_rec_inherit_to_gap(block, block, next_heap_no, heap_no);
lock_rec_inherit_to_gap(id, id, block->frame, next_heap_no, heap_no);
/* Reset the lock bits on rec and release waiting transactions */
lock_rec_reset_and_release_wait(block, heap_no);
lock_sys.mutex_unlock();
lock_rec_reset_and_release_wait(id, heap_no);
}
/*********************************************************************//**
......@@ -3090,39 +3002,27 @@ lock_rec_store_on_page_infimum(
bits are reset on the
record */
{
ulint heap_no = page_rec_get_heap_no(rec);
ut_ad(block->frame == page_align(rec));
const ulint heap_no= page_rec_get_heap_no(rec);
lock_sys.mutex_lock();
lock_rec_move(block, block, PAGE_HEAP_NO_INFIMUM, heap_no);
ut_ad(block->frame == page_align(rec));
const page_id_t id{block->page.id()};
lock_sys.mutex_unlock();
LockMutexGuard g;
lock_rec_move(*block, id, PAGE_HEAP_NO_INFIMUM, heap_no);
}
/*********************************************************************//**
Restores the state of explicit lock requests on a single record, where the
state was stored on the infimum of the page. */
void
lock_rec_restore_from_page_infimum(
/*===============================*/
const buf_block_t* block, /*!< in: buffer block containing rec */
const rec_t* rec, /*!< in: record whose lock state
is restored */
const buf_block_t* donator)/*!< in: page (rec is not
necessarily on this page)
whose infimum stored the lock
state; lock bits are reset on
the infimum */
/** Restore the explicit lock requests on a single record, where the
state was stored on the infimum of a page.
@param block buffer block containing rec
@param rec record whose lock state is restored
@param donator page (rec is not necessarily on this page)
whose infimum stored the lock state; lock bits are reset on the infimum */
void lock_rec_restore_from_page_infimum(const buf_block_t &block,
const rec_t *rec, page_id_t donator)
{
ulint heap_no = page_rec_get_heap_no(rec);
lock_sys.mutex_lock();
lock_rec_move(block, donator, heap_no, PAGE_HEAP_NO_INFIMUM);
lock_sys.mutex_unlock();
const ulint heap_no= page_rec_get_heap_no(rec);
LockMutexGuard g;
lock_rec_move(block, donator, heap_no, PAGE_HEAP_NO_INFIMUM);
}
/*========================= TABLE LOCKS ==============================*/
......@@ -3765,7 +3665,7 @@ lock_rec_unlock(
/*============*/
trx_t* trx, /*!< in/out: transaction that has
set a record lock */
const buf_block_t* block, /*!< in: buffer block containing rec */
const page_id_t id, /*!< in: page containing rec */
const rec_t* rec, /*!< in: record */
lock_mode lock_mode)/*!< in: LOCK_S or LOCK_X */
{
......@@ -3775,7 +3675,6 @@ lock_rec_unlock(
ut_ad(trx);
ut_ad(rec);
ut_ad(block->frame == page_align(rec));
ut_ad(!trx->lock.wait_lock);
ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE));
ut_ad(!page_rec_is_metadata(rec));
......@@ -3784,7 +3683,7 @@ lock_rec_unlock(
lock_sys.mutex_lock();
first_lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
first_lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
/* Find the last lock with the same lock_mode and transaction
on the record. */
......@@ -4390,7 +4289,7 @@ lock_rec_queue_validate(
/*!< in: if the caller holds
both the lock mutex and
trx_sys_t->lock. */
const buf_block_t* block, /*!< in: buffer block containing rec */
const page_id_t id, /*!< in: page identifier */
const rec_t* rec, /*!< in: record to look at */
const dict_index_t* index, /*!< in: index, or NULL if not known */
const rec_offs* offsets)/*!< in: rec_get_offsets(rec, index) */
......@@ -4399,7 +4298,6 @@ lock_rec_queue_validate(
ulint heap_no;
ut_a(rec);
ut_a(block->frame == page_align(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!page_rec_is_comp(rec) == !rec_offs_comp(offsets));
ut_ad(page_rec_is_leaf(rec));
......@@ -4417,7 +4315,7 @@ lock_rec_queue_validate(
if (!page_rec_is_user_rec(rec)) {
for (lock = lock_rec_get_first(&lock_sys.rec_hash,
block, heap_no);
id, heap_no);
lock != NULL;
lock = lock_rec_get_next_const(heap_no, lock)) {
......@@ -4458,8 +4356,7 @@ lock_rec_queue_validate(
if (impl_trx->state == TRX_STATE_COMMITTED_IN_MEMORY) {
} else if (const lock_t* other_lock
= lock_rec_other_has_expl_req(
LOCK_S, block, true, heap_no,
impl_trx)) {
LOCK_S, id, true, heap_no, impl_trx)) {
/* The impl_trx is holding an implicit lock on the
given record 'rec'. So there cannot be another
explicit granted lock. Also, there can be another
......@@ -4491,7 +4388,7 @@ lock_rec_queue_validate(
wsrep_report_bf_lock_wait(other_lock->trx->mysql_thd, other_lock->trx->id);
if (!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no,
id, heap_no,
impl_trx)) {
ib::info() << "WSREP impl BF lock conflict";
}
......@@ -4500,14 +4397,14 @@ lock_rec_queue_validate(
{
ut_ad(other_lock->is_waiting());
ut_ad(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, impl_trx));
id, heap_no, impl_trx));
}
}
impl_trx->mutex_unlock();
}
for (lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
for (lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
lock != NULL;
lock = lock_rec_get_next_const(heap_no, lock)) {
......@@ -4527,8 +4424,7 @@ lock_rec_queue_validate(
const lock_t* other_lock
= lock_rec_other_has_expl_req(
mode, block, false, heap_no,
lock->trx);
mode, id, false, heap_no, lock->trx);
#ifdef WITH_WSREP
if (UNIV_UNLIKELY(other_lock && lock->trx->is_wsrep())) {
/* Only BF transaction may be granted
......@@ -4566,9 +4462,11 @@ static bool lock_rec_validate_page(const buf_block_t *block, bool latched)
rec_offs* offsets = offsets_;
rec_offs_init(offsets_);
lock_sys.mutex_lock();
const page_id_t id{block->page.id()};
LockMutexGuard g;
loop:
lock = lock_sys.get_first(block->page.id());
lock = lock_sys.get_first(id);
if (!lock) {
goto function_exit;
......@@ -4609,7 +4507,7 @@ static bool lock_rec_validate_page(const buf_block_t *block, bool latched)
cause a deadlock of threads. */
lock_rec_queue_validate(
TRUE, block, rec, lock->index, offsets);
true, id, rec, lock->index, offsets);
nth_bit = i + 1;
......@@ -4623,8 +4521,6 @@ static bool lock_rec_validate_page(const buf_block_t *block, bool latched)
goto loop;
function_exit:
lock_sys.mutex_unlock();
if (heap != NULL) {
mem_heap_free(heap);
}
......@@ -4803,6 +4699,8 @@ lock_rec_insert_check_and_lock(
ulint heap_no = page_rec_get_heap_no(next_rec);
ut_ad(!rec_is_metadata(next_rec, *index));
const page_id_t id{block->page.id()};
lock_sys.mutex_lock();
/* Because this code is invoked for a running transaction by
the thread that is serving the transaction, it is not necessary
......@@ -4813,7 +4711,7 @@ lock_rec_insert_check_and_lock(
BTR_NO_LOCKING_FLAG and skip the locking altogether. */
ut_ad(lock_table_has(trx, index->table, LOCK_IX));
lock = lock_rec_get_first(&lock_sys.rec_hash, block, heap_no);
lock = lock_rec_get_first(&lock_sys.rec_hash, id, heap_no);
if (lock == NULL) {
/* We optimize CPU time usage in the simplest case */
......@@ -4856,14 +4754,15 @@ lock_rec_insert_check_and_lock(
#ifdef WITH_WSREP
lock_t* c_lock =
#endif /* WITH_WSREP */
lock_rec_other_has_conflicting(type_mode, block, heap_no, trx)) {
lock_rec_other_has_conflicting(type_mode, id, heap_no, trx)) {
trx->mutex_lock();
err = lock_rec_enqueue_waiting(
#ifdef WITH_WSREP
c_lock,
#endif /* WITH_WSREP */
type_mode, block, heap_no, index, thr, NULL);
type_mode, id, block->frame, heap_no, index,
thr, nullptr);
trx->mutex_unlock();
} else {
......@@ -4899,8 +4798,7 @@ lock_rec_insert_check_and_lock(
offsets = rec_get_offsets(next_rec, index, offsets_, true,
ULINT_UNDEFINED, &heap);
ut_ad(lock_rec_queue_validate(
FALSE, block, next_rec, index, offsets));
ut_ad(lock_rec_queue_validate(false, id, next_rec, index, offsets));
if (heap != NULL) {
mem_heap_free(heap);
......@@ -4920,33 +4818,32 @@ static
void
lock_rec_convert_impl_to_expl_for_trx(
/*==================================*/
const buf_block_t* block, /*!< in: buffer block of rec */
const page_id_t id, /*!< in: page identifier */
const rec_t* rec, /*!< in: user record on page */
dict_index_t* index, /*!< in: index of record */
trx_t* trx, /*!< in/out: active transaction */
ulint heap_no)/*!< in: rec heap number to lock */
{
ut_ad(trx->is_referenced());
ut_ad(page_rec_is_leaf(rec));
ut_ad(!rec_is_metadata(rec, *index));
ut_ad(trx->is_referenced());
ut_ad(page_rec_is_leaf(rec));
ut_ad(!rec_is_metadata(rec, *index));
DEBUG_SYNC_C("before_lock_rec_convert_impl_to_expl_for_trx");
lock_sys.mutex_lock();
trx->mutex_lock();
ut_ad(!trx_state_eq(trx, TRX_STATE_NOT_STARTED));
DEBUG_SYNC_C("before_lock_rec_convert_impl_to_expl_for_trx");
{
LockMutexGuard g;
trx->mutex_lock();
ut_ad(!trx_state_eq(trx, TRX_STATE_NOT_STARTED));
if (!trx_state_eq(trx, TRX_STATE_COMMITTED_IN_MEMORY)
&& !lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, trx)) {
lock_rec_add_to_queue(LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, index, trx, true);
}
if (!trx_state_eq(trx, TRX_STATE_COMMITTED_IN_MEMORY) &&
!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, id, heap_no, trx))
lock_rec_add_to_queue(LOCK_X | LOCK_REC_NOT_GAP, id, page_align(rec),
heap_no, index, trx, true);
}
lock_sys.mutex_unlock();
trx->mutex_unlock();
trx->release_reference();
trx->mutex_unlock();
trx->release_reference();
DEBUG_SYNC_C("after_lock_rec_convert_impl_to_expl_for_trx");
DEBUG_SYNC_C("after_lock_rec_convert_impl_to_expl_for_trx");
}
......@@ -4954,8 +4851,8 @@ lock_rec_convert_impl_to_expl_for_trx(
struct lock_rec_other_trx_holds_expl_arg
{
const ulint heap_no;
const buf_block_t * const block;
const trx_t *impl_trx;
const page_id_t id;
const trx_t &impl_trx;
};
......@@ -4969,13 +4866,14 @@ static my_bool lock_rec_other_trx_holds_expl_callback(
element->trx->mutex_lock();
ut_ad(element->trx->state != TRX_STATE_NOT_STARTED);
lock_t *expl_lock= element->trx->state == TRX_STATE_COMMITTED_IN_MEMORY
? NULL : lock_rec_has_expl(LOCK_S | LOCK_REC_NOT_GAP, arg->block,
arg->heap_no, element->trx);
? nullptr
: lock_rec_has_expl(LOCK_S | LOCK_REC_NOT_GAP,
arg->id, arg->heap_no, element->trx);
/*
An explicit lock is held by trx other than the trx holding the implicit
lock.
*/
ut_ad(!expl_lock || expl_lock->trx == arg->impl_trx);
ut_ad(!expl_lock || expl_lock->trx == &arg->impl_trx);
element->trx->mutex_unlock();
}
mysql_mutex_unlock(&element->mutex);
......@@ -4994,31 +4892,28 @@ static my_bool lock_rec_other_trx_holds_expl_callback(
@param caller_trx trx of current thread
@param[in] trx trx holding implicit lock on rec
@param[in] rec user record
@param[in] block buffer block containing the record
@param[in] id page identifier
*/
static void lock_rec_other_trx_holds_expl(trx_t *caller_trx, trx_t *trx,
const rec_t *rec,
const buf_block_t *block)
const page_id_t id)
{
if (trx)
{
ut_ad(!page_rec_is_metadata(rec));
lock_sys.mutex_lock();
LockMutexGuard g;
ut_ad(trx->is_referenced());
const trx_state_t state{trx->state};
ut_ad(state != TRX_STATE_NOT_STARTED);
if (state == TRX_STATE_COMMITTED_IN_MEMORY)
{
/* The transaction was committed before our lock_sys.mutex_lock(). */
lock_sys.mutex_unlock();
/* The transaction was committed before we acquired LockMutexGuard. */
return;
}
lock_rec_other_trx_holds_expl_arg arg= { page_rec_get_heap_no(rec), block,
trx };
lock_rec_other_trx_holds_expl_arg arg= { page_rec_get_heap_no(rec), id,
*trx };
trx_sys.rw_trx_hash.iterate(caller_trx,
lock_rec_other_trx_holds_expl_callback, &arg);
lock_sys.mutex_unlock();
}
}
#endif /* UNIV_DEBUG */
......@@ -5036,7 +4931,7 @@ an implicit exclusive lock on the record. In this case, no explicit lock
should be created.
@param[in,out] caller_trx current transaction
@param[in] block index tree leaf page
@param[in] id index tree leaf page identifier
@param[in] rec record on the leaf page
@param[in] index the index of the record
@param[in] offsets rec_get_offsets(rec,index)
......@@ -5045,7 +4940,7 @@ static
bool
lock_rec_convert_impl_to_expl(
trx_t* caller_trx,
const buf_block_t* block,
page_id_t id,
const rec_t* rec,
dict_index_t* index,
const rec_offs* offsets)
......@@ -5082,11 +4977,10 @@ lock_rec_convert_impl_to_expl(
return true;
}
ut_d(lock_rec_other_trx_holds_expl(caller_trx, trx, rec,
block));
ut_d(lock_rec_other_trx_holds_expl(caller_trx, trx, rec, id));
}
if (trx != 0) {
if (trx) {
ulint heap_no = page_rec_get_heap_no(rec);
ut_ad(trx->is_referenced());
......@@ -5096,7 +4990,7 @@ lock_rec_convert_impl_to_expl(
trx cannot be committed until the ref count is zero. */
lock_rec_convert_impl_to_expl_for_trx(
block, rec, index, trx, heap_no);
id, rec, index, trx, heap_no);
}
return false;
......@@ -5113,8 +5007,6 @@ lock queue.
dberr_t
lock_clust_rec_modify_check_and_lock(
/*=================================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
bit is set, does nothing */
const buf_block_t* block, /*!< in: buffer block of rec */
const rec_t* rec, /*!< in: record which should be
modified */
......@@ -5130,10 +5022,6 @@ lock_clust_rec_modify_check_and_lock(
ut_ad(dict_index_is_clust(index));
ut_ad(block->frame == page_align(rec));
if (flags & BTR_NO_LOCKING_FLAG) {
return(DB_SUCCESS);
}
ut_ad(!rec_is_metadata(rec, *index));
ut_ad(!index->table->is_temporary());
......@@ -5144,16 +5032,17 @@ lock_clust_rec_modify_check_and_lock(
/* If a transaction has no explicit x-lock set on the record, set one
for it */
if (lock_rec_convert_impl_to_expl(thr_get_trx(thr), block, rec, index,
offsets)) {
if (lock_rec_convert_impl_to_expl(thr_get_trx(thr), block->page.id(),
rec, index, offsets)) {
/* We already hold an implicit exclusive lock. */
return DB_SUCCESS;
}
err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP,
err = lock_rec_lock(true, LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, index, thr);
ut_ad(lock_rec_queue_validate(FALSE, block, rec, index, offsets));
ut_ad(lock_rec_queue_validate(false, block->page.id(),
rec, index, offsets));
if (err == DB_SUCCESS_LOCKED_REC) {
err = DB_SUCCESS;
......@@ -5205,7 +5094,7 @@ lock_sec_rec_modify_check_and_lock(
index record, and this would not have been possible if another active
transaction had modified this secondary index record. */
err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP,
err = lock_rec_lock(true, LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, index, thr);
#ifdef UNIV_DEBUG
......@@ -5219,7 +5108,7 @@ lock_sec_rec_modify_check_and_lock(
ULINT_UNDEFINED, &heap);
ut_ad(lock_rec_queue_validate(
FALSE, block, rec, index, offsets));
false, block->page.id(), rec, index, offsets));
if (heap != NULL) {
mem_heap_free(heap);
......@@ -5284,6 +5173,8 @@ lock_sec_rec_read_check_and_lock(
return(DB_SUCCESS);
}
const page_id_t id{block->page.id()};
ut_ad(!rec_is_metadata(rec, *index));
heap_no = page_rec_get_heap_no(rec);
......@@ -5293,16 +5184,16 @@ lock_sec_rec_read_check_and_lock(
if (!page_rec_is_supremum(rec)
&& page_get_max_trx_id(block->frame) >= trx_sys.get_min_trx_id()
&& lock_rec_convert_impl_to_expl(thr_get_trx(thr), block, rec,
&& lock_rec_convert_impl_to_expl(thr_get_trx(thr), id, rec,
index, offsets)) {
/* We already hold an implicit exclusive lock. */
return DB_SUCCESS;
}
err = lock_rec_lock(FALSE, gap_mode | mode,
err = lock_rec_lock(false, gap_mode | mode,
block, heap_no, index, thr);
ut_ad(lock_rec_queue_validate(FALSE, block, rec, index, offsets));
ut_ad(lock_rec_queue_validate(false, id, rec, index, offsets));
return(err);
}
......@@ -5355,19 +5246,21 @@ lock_clust_rec_read_check_and_lock(
return(DB_SUCCESS);
}
const page_id_t id{block->page.id()};
heap_no = page_rec_get_heap_no(rec);
if (heap_no != PAGE_HEAP_NO_SUPREMUM
&& lock_rec_convert_impl_to_expl(thr_get_trx(thr), block, rec,
&& lock_rec_convert_impl_to_expl(thr_get_trx(thr), id, rec,
index, offsets)) {
/* We already hold an implicit exclusive lock. */
return DB_SUCCESS;
}
err = lock_rec_lock(FALSE, gap_mode | mode,
err = lock_rec_lock(false, gap_mode | mode,
block, heap_no, index, thr);
ut_ad(lock_rec_queue_validate(FALSE, block, rec, index, offsets));
ut_ad(lock_rec_queue_validate(false, id, rec, index, offsets));
DEBUG_SYNC_C("after_lock_clust_rec_read_check_and_lock");
......@@ -5767,25 +5660,18 @@ lock_trx_has_sys_table_locks(
/** Check if the transaction holds an explicit exclusive lock on a record.
@param[in] trx transaction
@param[in] table table
@param[in] block leaf page
@param[in] id leaf page identifier
@param[in] heap_no heap number identifying the record
@return whether an explicit X-lock is held */
bool
lock_trx_has_expl_x_lock(
const trx_t* trx, /*!< in: transaction to check */
const dict_table_t* table, /*!< in: table to check */
const buf_block_t* block, /*!< in: buffer block of the record */
ulint heap_no)/*!< in: record heap number */
bool lock_trx_has_expl_x_lock(const trx_t &trx, const dict_table_t &table,
page_id_t id, ulint heap_no)
{
ut_ad(heap_no > PAGE_HEAP_NO_SUPREMUM);
lock_sys.mutex_lock();
ut_ad(lock_table_has(trx, table, LOCK_IX));
ut_ad(lock_table_has(trx, table, LOCK_X)
|| lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, block, heap_no,
trx));
lock_sys.mutex_unlock();
return(true);
ut_ad(heap_no > PAGE_HEAP_NO_SUPREMUM);
LockMutexGuard g;
ut_ad(lock_table_has(&trx, &table, LOCK_IX));
ut_ad(lock_table_has(&trx, &table, LOCK_X) ||
lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, id, heap_no, &trx));
return true;
}
#endif /* UNIV_DEBUG */
......@@ -6273,28 +6159,26 @@ lock_update_split_and_merge(
ut_ad(page_is_leaf(right_block->frame));
ut_ad(page_align(orig_pred) == left_block->frame);
lock_sys.mutex_lock();
const page_id_t l{left_block->page.id()};
const page_id_t r{right_block->page.id()};
LockMutexGuard g;
left_next_rec = page_rec_get_next_const(orig_pred);
ut_ad(!page_rec_is_metadata(left_next_rec));
/* Inherit the locks on the supremum of the left page to the
first record which was moved from the right page */
lock_rec_inherit_to_gap(
left_block, left_block,
page_rec_get_heap_no(left_next_rec),
PAGE_HEAP_NO_SUPREMUM);
lock_rec_inherit_to_gap(l, l, left_block->frame,
page_rec_get_heap_no(left_next_rec),
PAGE_HEAP_NO_SUPREMUM);
/* Reset the locks on the supremum of the left page,
releasing waiting transactions */
lock_rec_reset_and_release_wait(left_block,
PAGE_HEAP_NO_SUPREMUM);
lock_rec_reset_and_release_wait(l, PAGE_HEAP_NO_SUPREMUM);
/* Inherit the locks to the supremum of the left page from the
successor of the infimum on the right page */
lock_rec_inherit_to_gap(left_block, right_block,
lock_rec_inherit_to_gap(l, r, left_block->frame,
PAGE_HEAP_NO_SUPREMUM,
lock_get_min_heap_no(right_block));
lock_sys.mutex_unlock();
}
......@@ -229,8 +229,7 @@ lock_prdt_has_lock(
/*===============*/
ulint precise_mode, /*!< in: LOCK_S or LOCK_X */
unsigned type_mode, /*!< in: LOCK_PREDICATE etc. */
const buf_block_t* block, /*!< in: buffer block
containing the record */
const page_id_t id, /*!< in: page identifier */
lock_prdt_t* prdt, /*!< in: The predicate to be
attached to the new lock */
const trx_t* trx) /*!< in: transaction */
......@@ -243,7 +242,7 @@ lock_prdt_has_lock(
ut_ad(!(precise_mode & LOCK_INSERT_INTENTION));
for (lock = lock_rec_get_first(
lock_hash_get(type_mode), block, PRDT_HEAPNO);
lock_hash_get(type_mode), id, PRDT_HEAPNO);
lock != NULL;
lock = lock_rec_get_next(PRDT_HEAPNO, lock)) {
ut_ad(lock->type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE));
......@@ -286,8 +285,7 @@ lock_prdt_other_has_conflicting(
unsigned mode, /*!< in: LOCK_S or LOCK_X,
possibly ORed to LOCK_PREDICATE or
LOCK_PRDT_PAGE, LOCK_INSERT_INTENTION */
const buf_block_t* block, /*!< in: buffer block containing
the record */
const page_id_t id, /*!< in: page identifier */
lock_prdt_t* prdt, /*!< in: Predicates (currently)
the Minimum Bounding Rectangle)
the new lock will be on */
......@@ -296,7 +294,7 @@ lock_prdt_other_has_conflicting(
lock_sys.mutex_assert_locked();
for (lock_t* lock = lock_rec_get_first(
lock_hash_get(mode), block, PRDT_HEAPNO);
lock_hash_get(mode), id, PRDT_HEAPNO);
lock != NULL;
lock = lock_rec_get_next(PRDT_HEAPNO, lock)) {
......@@ -509,6 +507,7 @@ lock_prdt_insert_check_and_lock(
ut_ad(index->is_spatial());
trx_t* trx = thr_get_trx(thr);
const page_id_t id{block->page.id()};
lock_sys.mutex_lock();
......@@ -521,7 +520,7 @@ lock_prdt_insert_check_and_lock(
lock_t* lock;
/* Only need to check locks on prdt_hash */
lock = lock_rec_get_first(&lock_sys.prdt_hash, block, PRDT_HEAPNO);
lock = lock_rec_get_first(&lock_sys.prdt_hash, id, PRDT_HEAPNO);
if (lock == NULL) {
lock_sys.mutex_unlock();
......@@ -547,7 +546,7 @@ lock_prdt_insert_check_and_lock(
const ulint mode = LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION;
const lock_t* wait_for = lock_prdt_other_has_conflicting(
mode, block, prdt, trx);
mode, id, prdt, trx);
if (wait_for != NULL) {
rtr_mbr_t* mbr = prdt_get_mbr_from_prdt(prdt);
......@@ -563,7 +562,7 @@ lock_prdt_insert_check_and_lock(
NULL, /* FIXME: replicate SPATIAL INDEX locks */
#endif
LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION,
block, PRDT_HEAPNO, index, thr, prdt);
id, block->frame, PRDT_HEAPNO, index, thr, prdt);
trx->mutex_unlock();
} else {
......@@ -762,6 +761,7 @@ lock_prdt_lock(
const hash_table_t& hash = type_mode == LOCK_PREDICATE
? lock_sys.prdt_hash
: lock_sys.prdt_page_hash;
const page_id_t id{block->page.id()};
/* Another transaction cannot have an implicit lock on the record,
because when we come here, we already have modified the clustered
......@@ -771,7 +771,7 @@ lock_prdt_lock(
lock_sys.mutex_lock();
const unsigned prdt_mode = type_mode | mode;
lock_t* lock = lock_sys.get_first(hash, block->page.id());
lock_t* lock = lock_sys.get_first(hash, id);
if (lock == NULL) {
lock = lock_rec_create(
......@@ -793,14 +793,14 @@ lock_prdt_lock(
trx->mutex_lock();
lock = lock_prdt_has_lock(
mode, type_mode, block, prdt, trx);
mode, type_mode, id, prdt, trx);
if (lock == NULL) {
lock_t* wait_for;
wait_for = lock_prdt_other_has_conflicting(
prdt_mode, block, prdt, trx);
prdt_mode, id, prdt, trx);
if (wait_for != NULL) {
......@@ -810,7 +810,7 @@ lock_prdt_lock(
SPATIAL INDEX locks */
#endif
prdt_mode,
block, PRDT_HEAPNO,
id, block->frame, PRDT_HEAPNO,
index, thr, prdt);
} else {
......@@ -921,8 +921,7 @@ lock_prdt_rec_move(
/*===============*/
const buf_block_t* receiver, /*!< in: buffer block containing
the receiving record */
const buf_block_t* donator) /*!< in: buffer block containing
the donating record */
const page_id_t donator) /*!< in: target page */
{
lock_sys.mutex_lock();
......@@ -948,19 +947,17 @@ lock_prdt_rec_move(
}
/** Removes predicate lock objects set on an index page which is discarded.
@param[in] block page to be discarded
@param[in] id page to be discarded
@param[in] lock_hash lock hash */
void
lock_prdt_page_free_from_discard(
const buf_block_t* block,
hash_table_t* lock_hash)
lock_prdt_page_free_from_discard(const page_id_t id, hash_table_t *lock_hash)
{
lock_t* lock;
lock_t* next_lock;
lock_sys.mutex_assert_locked();
lock = lock_sys.get_first(*lock_hash, block->page.id());
lock = lock_sys.get_first(*lock_hash, id);
while (lock != NULL) {
next_lock = lock_rec_get_next_on_page(lock);
......
......@@ -1988,7 +1988,7 @@ row_unlock_for_mysql(
lock_rec_unlock(
trx,
btr_pcur_get_block(pcur),
btr_pcur_get_block(pcur)->page.id(),
rec,
static_cast<enum lock_mode>(
prebuilt->select_lock_type));
......@@ -1998,7 +1998,8 @@ row_unlock_for_mysql(
lock_rec_unlock(
trx,
btr_pcur_get_block(clust_pcur),
btr_pcur_get_block(clust_pcur)
->page.id(),
rec,
static_cast<enum lock_mode>(
prebuilt->select_lock_type));
......
......@@ -2840,7 +2840,7 @@ row_upd_clust_step(
if (!flags && !node->has_clust_rec_x_lock) {
err = lock_clust_rec_modify_check_and_lock(
0, btr_pcur_get_block(pcur),
btr_pcur_get_block(pcur),
rec, index, offsets, thr);
if (err != DB_SUCCESS) {
mtr.commit();
......@@ -2850,8 +2850,8 @@ row_upd_clust_step(
ut_ad(index->table->no_rollback() || index->table->is_temporary()
|| row_get_rec_trx_id(rec, index, offsets) == trx->id
|| lock_trx_has_expl_x_lock(trx, index->table,
btr_pcur_get_block(pcur),
|| lock_trx_has_expl_x_lock(*trx, *index->table,
btr_pcur_get_block(pcur)->page.id(),
page_rec_get_heap_no(rec)));
/* NOTE: the following function calls will also commit mtr */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment