Commit 2e64513f authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-20612 preparation: Fewer calls to buf_page_t::id()

parent b19ec884
......@@ -1873,6 +1873,8 @@ btr_root_raise_and_insert(
ut_a(!root_page_zip || page_zip_validate(root_page_zip, root->frame,
index));
#endif /* UNIV_ZIP_DEBUG */
const page_id_t root_id{root->page.id()};
#ifdef UNIV_BTR_DEBUG
if (!dict_index_is_ibuf(index)) {
ulint space = index->table->space_id;
......@@ -1883,7 +1885,7 @@ btr_root_raise_and_insert(
+ root->frame, space));
}
ut_a(dict_index_get_page(index) == root->page.id().page_no());
ut_a(dict_index_get_page(index) == root_id.page_no());
#endif /* UNIV_BTR_DEBUG */
ut_ad(mtr->memo_contains_flagged(&index->lock, MTR_MEMO_X_LOCK
| MTR_MEMO_SX_LOCK));
......@@ -1941,7 +1943,7 @@ btr_root_raise_and_insert(
/* Move any existing predicate locks */
if (dict_index_is_spatial(index)) {
lock_prdt_rec_move(new_block, root);
lock_prdt_rec_move(new_block, root_id);
} else {
btr_search_move_or_delete_hash_entries(
new_block, root);
......@@ -1986,7 +1988,7 @@ btr_root_raise_and_insert(
root page: we cannot discard the lock structs on the root page */
if (!dict_table_is_locking_disabled(index->table)) {
lock_update_root_raise(new_block, root);
lock_update_root_raise(*new_block, root_id);
}
/* Create a memory heap where the node pointer is stored */
......@@ -3342,7 +3344,7 @@ btr_lift_page_up(
/* Also update the predicate locks */
if (dict_index_is_spatial(index)) {
lock_prdt_rec_move(father_block, block);
lock_prdt_rec_move(father_block, block->page.id());
} else {
btr_search_move_or_delete_hash_entries(
father_block, block);
......@@ -3350,14 +3352,14 @@ btr_lift_page_up(
}
if (!dict_table_is_locking_disabled(index->table)) {
const page_id_t id{block->page.id()};
/* Free predicate page locks on the block */
if (dict_index_is_spatial(index)) {
lock_sys.mutex_lock();
if (index->is_spatial()) {
LockMutexGuard g;
lock_prdt_page_free_from_discard(
block, &lock_sys.prdt_page_hash);
lock_sys.mutex_unlock();
id, &lock_sys.prdt_page_hash);
}
lock_update_copy_and_discard(father_block, block);
lock_update_copy_and_discard(*father_block, id);
}
/* Go upward to root page, decrementing levels by one. */
......@@ -3576,6 +3578,8 @@ btr_compress(
/* Remove the page from the level list */
btr_level_list_remove(*block, *index, mtr);
const page_id_t id{block->page.id()};
if (dict_index_is_spatial(index)) {
rec_t* my_rec = father_cursor.page_cur.rec;
......@@ -3605,16 +3609,15 @@ btr_compress(
}
/* No GAP lock needs to be worrying about */
lock_sys.mutex_lock();
LockMutexGuard g;
lock_prdt_page_free_from_discard(
block, &lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page(block);
lock_sys.mutex_unlock();
id, &lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page(id);
} else {
btr_cur_node_ptr_delete(&father_cursor, mtr);
if (!dict_table_is_locking_disabled(index->table)) {
lock_update_merge_left(
merge_block, orig_pred, block);
*merge_block, orig_pred, id);
}
}
......@@ -3758,11 +3761,11 @@ btr_compress(
offsets2, offsets,
merge_page, mtr);
}
lock_sys.mutex_lock();
const page_id_t id{block->page.id()};
LockMutexGuard g;
lock_prdt_page_free_from_discard(
block, &lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page(block);
lock_sys.mutex_unlock();
id, &lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page(id);
} else {
compressed = btr_cur_pessimistic_delete(&err, TRUE,
......
......@@ -3311,15 +3311,15 @@ static void btr_cur_prefetch_siblings(const buf_block_t *block,
uint32_t prev= mach_read_from_4(my_assume_aligned<4>(page + FIL_PAGE_PREV));
uint32_t next= mach_read_from_4(my_assume_aligned<4>(page + FIL_PAGE_NEXT));
fil_space_t *space= index->table->space;
if (prev == FIL_NULL);
else if (index->table->space->acquire())
buf_read_page_background(index->table->space,
page_id_t(block->page.id().space(), prev),
else if (space->acquire())
buf_read_page_background(space, page_id_t(space->id, prev),
block->zip_size(), false);
if (next == FIL_NULL);
else if (index->table->space->acquire())
buf_read_page_background(index->table->space,
page_id_t(block->page.id().space(), next),
else if (space->acquire())
buf_read_page_background(space, page_id_t(space->id, next),
block->zip_size(), false);
}
......@@ -3859,7 +3859,7 @@ btr_cur_upd_lock_and_undo(
if (!(flags & BTR_NO_LOCKING_FLAG)) {
err = lock_clust_rec_modify_check_and_lock(
flags, btr_cur_get_block(cursor), rec, index,
btr_cur_get_block(cursor), rec, index,
offsets, thr);
if (err != DB_SUCCESS) {
return(err);
......@@ -4751,7 +4751,8 @@ btr_cur_optimistic_update(
btr_page_reorganize(page_cursor, index, mtr);
} else if (!dict_table_is_locking_disabled(index->table)) {
/* Restore the old explicit lock state on the record */
lock_rec_restore_from_page_infimum(block, rec, block);
lock_rec_restore_from_page_infimum(*block, rec,
block->page.id());
}
page_cur_move_to_next(page_cursor);
......@@ -4805,10 +4806,11 @@ btr_cur_pess_upd_restore_supremum(
const uint32_t prev_page_no = btr_page_get_prev(page);
const page_id_t page_id(block->page.id().space(), prev_page_no);
const page_id_t block_id{block->page.id()};
const page_id_t prev_id(block_id.space(), prev_page_no);
ut_ad(prev_page_no != FIL_NULL);
prev_block = buf_page_get_with_no_latch(page_id, block->zip_size(),
prev_block = buf_page_get_with_no_latch(prev_id, block->zip_size(),
mtr);
#ifdef UNIV_BTR_DEBUG
ut_a(btr_page_get_next(prev_block->frame)
......@@ -4818,7 +4820,7 @@ btr_cur_pess_upd_restore_supremum(
/* We must already have an x-latch on prev_block! */
ut_ad(mtr->memo_contains_flagged(prev_block, MTR_MEMO_PAGE_X_FIX));
lock_rec_reset_and_inherit_gap_locks(prev_block, block,
lock_rec_reset_and_inherit_gap_locks(*prev_block, block_id,
PAGE_HEAP_NO_SUPREMUM,
page_rec_get_heap_no(rec));
}
......@@ -5106,7 +5108,8 @@ btr_cur_pessimistic_update(
}
} else if (!dict_table_is_locking_disabled(index->table)) {
lock_rec_restore_from_page_infimum(
btr_cur_get_block(cursor), rec, block);
*btr_cur_get_block(cursor), rec,
block->page.id());
}
if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets))
......@@ -5251,7 +5254,7 @@ btr_cur_pessimistic_update(
rec = page_cursor->rec;
} else if (!dict_table_is_locking_disabled(index->table)) {
lock_rec_restore_from_page_infimum(
btr_cur_get_block(cursor), rec, block);
*btr_cur_get_block(cursor), rec, block->page.id());
}
/* If necessary, restore also the correct lock state for a new,
......@@ -5351,14 +5354,6 @@ btr_cur_del_mark_set_clust_rec(
return(DB_SUCCESS);
}
err = lock_clust_rec_modify_check_and_lock(BTR_NO_LOCKING_FLAG, block,
rec, index, offsets, thr);
if (err != DB_SUCCESS) {
return(err);
}
err = trx_undo_report_row_operation(thr, index,
entry, NULL, 0, rec, offsets,
&roll_ptr);
......
......@@ -397,8 +397,8 @@ btr_defragment_merge_pages(
if (n_recs_to_move == n_recs) {
/* The whole page is merged with the previous page,
free it. */
lock_update_merge_left(to_block, orig_pred,
from_block);
const page_id_t from{from_block->page.id()};
lock_update_merge_left(*to_block, orig_pred, from);
btr_search_drop_page_hash_index(from_block);
btr_level_list_remove(*from_block, *index, mtr);
btr_page_get_father(index, from_block, mtr, &parent);
......
/*****************************************************************************
Copyright (c) 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2018, 2020, MariaDB Corporation.
Copyright (c) 2018, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -769,7 +769,7 @@ rtr_split_page_move_rec_list(
ut_a(rec);
lock_rec_restore_from_page_infimum(
new_block, rec, block);
*new_block, rec, block->page.id());
page_cur_move_to_next(&new_page_cursor);
......
/*****************************************************************************
Copyright (c) 2016, 2018, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2020, MariaDB Corporation.
Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -1164,7 +1164,7 @@ rtr_check_discard_page(
the root page */
buf_block_t* block) /*!< in: block of page to be discarded */
{
const ulint pageno = block->page.id().page_no();
const page_id_t id{block->page.id()};
mysql_mutex_lock(&index->rtr_track->rtr_active_mutex);
......@@ -1175,8 +1175,8 @@ rtr_check_discard_page(
mysql_mutex_lock(&rtr_info->rtr_path_mutex);
for (const node_visit_t& node : *rtr_info->path) {
if (node.page_no == pageno) {
rtr_rebuild_path(rtr_info, pageno);
if (node.page_no == id.page_no()) {
rtr_rebuild_path(rtr_info, node.page_no);
break;
}
}
......@@ -1185,8 +1185,7 @@ rtr_check_discard_page(
if (rtr_info->matches) {
mysql_mutex_lock(&rtr_info->matches->rtr_match_mutex);
if ((&rtr_info->matches->block)->page.id().page_no()
== pageno) {
if ((&rtr_info->matches->block)->page.id() == id) {
if (!rtr_info->matches->matched_recs->empty()) {
rtr_info->matches->matched_recs->clear();
}
......@@ -1200,10 +1199,9 @@ rtr_check_discard_page(
mysql_mutex_unlock(&index->rtr_track->rtr_active_mutex);
lock_sys.mutex_lock();
lock_prdt_page_free_from_discard(block, &lock_sys.prdt_hash);
lock_prdt_page_free_from_discard(block, &lock_sys.prdt_page_hash);
lock_sys.mutex_unlock();
LockMutexGuard g;
lock_prdt_page_free_from_discard(id, &lock_sys.prdt_hash);
lock_prdt_page_free_from_discard(id, &lock_sys.prdt_page_hash);
}
/** Structure acts as functor to get the optimistic access of the page.
......
......@@ -3790,7 +3790,8 @@ ibuf_insert_to_index_page(
&page_cur);
ut_ad(!cmp_dtuple_rec(entry, rec, offsets));
lock_rec_restore_from_page_infimum(block, rec, block);
lock_rec_restore_from_page_infimum(*block, rec,
block->page.id());
} else {
offsets = NULL;
ibuf_insert_to_index_page_low(entry, block, index,
......
......@@ -112,28 +112,17 @@ lock_update_merge_right(
const buf_block_t* left_block); /*!< in: merged index
page which will be
discarded */
/*************************************************************//**
Updates the lock table when the root page is copied to another in
btr_root_raise_and_insert. Note that we leave lock structs on the
/** Update locks when the root page is copied to another in
btr_root_raise_and_insert(). Note that we leave lock structs on the
root page, even though they do not make sense on other than leaf
pages: the reason is that in a pessimistic update the infimum record
of the root page will act as a dummy carrier of the locks of the record
to be updated. */
void
lock_update_root_raise(
/*===================*/
const buf_block_t* block, /*!< in: index page to which copied */
const buf_block_t* root); /*!< in: root page */
/*************************************************************//**
Updates the lock table when a page is copied to another and the original page
is removed from the chain of leaf pages, except if page is the root! */
void
lock_update_copy_and_discard(
/*=========================*/
const buf_block_t* new_block, /*!< in: index page to
which copied */
const buf_block_t* block); /*!< in: index page;
NOT the root! */
void lock_update_root_raise(const buf_block_t &block, const page_id_t root);
/** Update the lock table when a page is copied to another.
@param new_block the target page
@param old old page (not index root page) */
void lock_update_copy_and_discard(const buf_block_t &new_block, page_id_t old);
/*************************************************************//**
Updates the lock table when a page is split to the left. */
void
......@@ -141,18 +130,12 @@ lock_update_split_left(
/*===================*/
const buf_block_t* right_block, /*!< in: right page */
const buf_block_t* left_block); /*!< in: left page */
/*************************************************************//**
Updates the lock table when a page is merged to the left. */
void
lock_update_merge_left(
/*===================*/
const buf_block_t* left_block, /*!< in: left page to
which merged */
const rec_t* orig_pred, /*!< in: original predecessor
of supremum on the left page
before merge */
const buf_block_t* right_block); /*!< in: merged index page
which will be discarded */
/** Update the lock table when a page is merged to the left.
@param left left page
@param orig_pred original predecessor of supremum on the left page before merge
@param right merged, to-be-discarded right page */
void lock_update_merge_left(const buf_block_t& left, const rec_t *orig_pred,
const page_id_t right);
/*************************************************************//**
Updates the lock table when a page is split and merged to
two pages. */
......@@ -169,9 +152,9 @@ inherited from rec. */
void
lock_rec_reset_and_inherit_gap_locks(
/*=================================*/
const buf_block_t* heir_block, /*!< in: block containing the
const buf_block_t& heir_block, /*!< in: block containing the
record which inherits */
const buf_block_t* block, /*!< in: block containing the
const page_id_t donor, /*!< in: page containing the
record from which inherited;
does NOT reset the locks on
this record */
......@@ -220,20 +203,14 @@ lock_rec_store_on_page_infimum(
record of the same page; lock
bits are reset on the
record */
/*********************************************************************//**
Restores the state of explicit lock requests on a single record, where the
state was stored on the infimum of the page. */
void
lock_rec_restore_from_page_infimum(
/*===============================*/
const buf_block_t* block, /*!< in: buffer block containing rec */
const rec_t* rec, /*!< in: record whose lock state
is restored */
const buf_block_t* donator);/*!< in: page (rec is not
necessarily on this page)
whose infimum stored the lock
state; lock bits are reset on
the infimum */
/** Restore the explicit lock requests on a single record, where the
state was stored on the infimum of a page.
@param block buffer block containing rec
@param rec record whose lock state is restored
@param donator page (rec is not necessarily on this page)
whose infimum stored the lock state; lock bits are reset on the infimum */
void lock_rec_restore_from_page_infimum(const buf_block_t &block,
const rec_t *rec, page_id_t donator);
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate insert of
a record. If they do, first tests if the query thread should anyway
......@@ -266,8 +243,6 @@ lock queue.
dberr_t
lock_clust_rec_modify_check_and_lock(
/*=================================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
bit is set, does nothing */
const buf_block_t* block, /*!< in: buffer block of rec */
const rec_t* rec, /*!< in: record which should be
modified */
......@@ -423,7 +398,7 @@ lock_rec_unlock(
/*============*/
trx_t* trx, /*!< in/out: transaction that has
set a record lock */
const buf_block_t* block, /*!< in: buffer block containing rec */
const page_id_t id, /*!< in: page containing rec */
const rec_t* rec, /*!< in: record */
lock_mode lock_mode);/*!< in: LOCK_S or LOCK_X */
......@@ -589,16 +564,11 @@ lock_trx_has_sys_table_locks(
/** Check if the transaction holds an explicit exclusive lock on a record.
@param[in] trx transaction
@param[in] table table
@param[in] block leaf page
@param[in] id leaf page identifier
@param[in] heap_no heap number identifying the record
@return whether an explicit X-lock is held */
bool
lock_trx_has_expl_x_lock(
const trx_t* trx, /*!< in: transaction to check */
const dict_table_t* table, /*!< in: table to check */
const buf_block_t* block, /*!< in: buffer block of the record */
ulint heap_no)/*!< in: record heap number */
MY_ATTRIBUTE((nonnull, warn_unused_result));
bool lock_trx_has_expl_x_lock(const trx_t &trx, const dict_table_t &table,
page_id_t id, ulint heap_no);
#endif /* UNIV_DEBUG */
/** Lock operation struct */
......@@ -811,6 +781,7 @@ lock_rec_create_low(
dict_index_t* index,
trx_t* trx,
bool holds_trx_mutex);
/** Enqueue a waiting request for a lock which cannot be granted immediately.
Check for deadlocks.
@param[in] type_mode the requested lock mode (LOCK_S or LOCK_X)
......@@ -820,7 +791,8 @@ Check for deadlocks.
waiting lock request is set
when performing an insert of
an index record
@param[in] block leaf page in the index
@param[in] id page identifier
@param[in] page leaf page in the index
@param[in] heap_no record heap number in the block
@param[in] index index tree
@param[in,out] thr query thread
......@@ -833,7 +805,8 @@ lock_rec_enqueue_waiting(
lock_t* c_lock, /*!< conflicting lock */
#endif
unsigned type_mode,
const buf_block_t* block,
const page_id_t id,
const page_t* page,
ulint heap_no,
dict_index_t* index,
que_thr_t* thr,
......@@ -851,14 +824,10 @@ lock_rtr_move_rec_list(
moved */
ulint num_move); /*!< in: num of rec to move */
/*************************************************************//**
Removes record lock objects set on an index page which is discarded. This
/** Remove record locks for an index page which is discarded. This
function does not move locks, or check for waiting locks, therefore the
lock bitmaps must already be reset when this function is called. */
void
lock_rec_free_all_from_discard_page(
/*================================*/
const buf_block_t* block); /*!< in: page to be discarded */
void lock_rec_free_all_from_discard_page(const page_id_t page_id);
/** Cancel a waiting lock request and release possibly waiting transactions */
void lock_cancel_waiting_and_release(lock_t *lock);
......
......@@ -181,8 +181,7 @@ lock_prdt_rec_move(
/*===============*/
const buf_block_t* receiver, /*!< in: buffer block containing
the receiving record */
const buf_block_t* donator); /*!< in: buffer block containing
the donating record */
const page_id_t donator); /*!< in: target page */
/** Check whether there are R-tree Page lock on a page
@param[in] trx trx to test the lock
......@@ -191,12 +190,9 @@ lock_prdt_rec_move(
bool lock_test_prdt_page_lock(const trx_t *trx, const page_id_t page_id);
/** Removes predicate lock objects set on an index page which is discarded.
@param[in] block page to be discarded
@param[in] id page to be discarded
@param[in] lock_hash lock hash */
void
lock_prdt_page_free_from_discard(
/*=============================*/
const buf_block_t* block,
hash_table_t* lock_hash);
lock_prdt_page_free_from_discard(const page_id_t id, hash_table_t *lock_hash);
#endif
......@@ -527,16 +527,21 @@ lock_rec_get_next_const(
ulint heap_no,/*!< in: heap number of the record */
const lock_t* lock); /*!< in: lock */
/*********************************************************************//**
Gets the first explicit lock request on a record.
@return first lock, NULL if none exists */
UNIV_INLINE
lock_t*
lock_rec_get_first(
/*===============*/
hash_table_t* hash, /*!< in: hash chain the lock on */
const buf_block_t* block, /*!< in: block containing the record */
ulint heap_no);/*!< in: heap number of the record */
/** Get the first explicit lock request on a record.
@param hash lock hash table
@param id page identifier
@param heap_no record identifier in page
@return first lock
@retval nullptr if none exists */
inline lock_t*
lock_rec_get_first(hash_table_t *hash, const page_id_t id, ulint heap_no)
{
for (lock_t *lock= lock_sys.get_first(*hash, id);
lock; lock= lock_rec_get_next_on_page(lock))
if (lock_rec_get_nth_bit(lock, heap_no))
return lock;
return nullptr;
}
/*********************************************************************//**
Calculates if lock mode 1 is compatible with lock mode 2.
......
......@@ -136,25 +136,7 @@ lock_rec_get_next_const(
ulint heap_no,/*!< in: heap number of the record */
const lock_t* lock) /*!< in: lock */
{
return(lock_rec_get_next(heap_no, (lock_t*) lock));
}
/*********************************************************************//**
Gets the first explicit lock request on a record.
@return first lock, NULL if none exists */
UNIV_INLINE
lock_t*
lock_rec_get_first(
/*===============*/
hash_table_t* hash, /*!< in: hash chain the lock on */
const buf_block_t* block, /*!< in: block containing the record */
ulint heap_no)/*!< in: heap number of the record */
{
for (lock_t *lock= lock_sys.get_first(*hash, block->page.id());
lock; lock= lock_rec_get_next_on_page(lock))
if (lock_rec_get_nth_bit(lock, heap_no))
return lock;
return nullptr;
return lock_rec_get_next(heap_no, const_cast<lock_t*>(lock));
}
/*********************************************************************//**
......@@ -192,7 +174,7 @@ lock_rec_get_next_on_page_const(
{
ut_ad(!lock->is_table());
const page_id_t page_id(lock->un_member.rec_lock.page_id);
const page_id_t page_id{lock->un_member.rec_lock.page_id};
lock_sys.mutex_assert_locked();
while (!!(lock= static_cast<const lock_t*>(HASH_GET_NEXT(hash, lock))))
......
This diff is collapsed.
......@@ -229,8 +229,7 @@ lock_prdt_has_lock(
/*===============*/
ulint precise_mode, /*!< in: LOCK_S or LOCK_X */
unsigned type_mode, /*!< in: LOCK_PREDICATE etc. */
const buf_block_t* block, /*!< in: buffer block
containing the record */
const page_id_t id, /*!< in: page identifier */
lock_prdt_t* prdt, /*!< in: The predicate to be
attached to the new lock */
const trx_t* trx) /*!< in: transaction */
......@@ -243,7 +242,7 @@ lock_prdt_has_lock(
ut_ad(!(precise_mode & LOCK_INSERT_INTENTION));
for (lock = lock_rec_get_first(
lock_hash_get(type_mode), block, PRDT_HEAPNO);
lock_hash_get(type_mode), id, PRDT_HEAPNO);
lock != NULL;
lock = lock_rec_get_next(PRDT_HEAPNO, lock)) {
ut_ad(lock->type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE));
......@@ -286,8 +285,7 @@ lock_prdt_other_has_conflicting(
unsigned mode, /*!< in: LOCK_S or LOCK_X,
possibly ORed to LOCK_PREDICATE or
LOCK_PRDT_PAGE, LOCK_INSERT_INTENTION */
const buf_block_t* block, /*!< in: buffer block containing
the record */
const page_id_t id, /*!< in: page identifier */
lock_prdt_t* prdt, /*!< in: Predicates (currently)
the Minimum Bounding Rectangle)
the new lock will be on */
......@@ -296,7 +294,7 @@ lock_prdt_other_has_conflicting(
lock_sys.mutex_assert_locked();
for (lock_t* lock = lock_rec_get_first(
lock_hash_get(mode), block, PRDT_HEAPNO);
lock_hash_get(mode), id, PRDT_HEAPNO);
lock != NULL;
lock = lock_rec_get_next(PRDT_HEAPNO, lock)) {
......@@ -509,6 +507,7 @@ lock_prdt_insert_check_and_lock(
ut_ad(index->is_spatial());
trx_t* trx = thr_get_trx(thr);
const page_id_t id{block->page.id()};
lock_sys.mutex_lock();
......@@ -521,7 +520,7 @@ lock_prdt_insert_check_and_lock(
lock_t* lock;
/* Only need to check locks on prdt_hash */
lock = lock_rec_get_first(&lock_sys.prdt_hash, block, PRDT_HEAPNO);
lock = lock_rec_get_first(&lock_sys.prdt_hash, id, PRDT_HEAPNO);
if (lock == NULL) {
lock_sys.mutex_unlock();
......@@ -547,7 +546,7 @@ lock_prdt_insert_check_and_lock(
const ulint mode = LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION;
const lock_t* wait_for = lock_prdt_other_has_conflicting(
mode, block, prdt, trx);
mode, id, prdt, trx);
if (wait_for != NULL) {
rtr_mbr_t* mbr = prdt_get_mbr_from_prdt(prdt);
......@@ -563,7 +562,7 @@ lock_prdt_insert_check_and_lock(
NULL, /* FIXME: replicate SPATIAL INDEX locks */
#endif
LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION,
block, PRDT_HEAPNO, index, thr, prdt);
id, block->frame, PRDT_HEAPNO, index, thr, prdt);
trx->mutex_unlock();
} else {
......@@ -762,6 +761,7 @@ lock_prdt_lock(
const hash_table_t& hash = type_mode == LOCK_PREDICATE
? lock_sys.prdt_hash
: lock_sys.prdt_page_hash;
const page_id_t id{block->page.id()};
/* Another transaction cannot have an implicit lock on the record,
because when we come here, we already have modified the clustered
......@@ -771,7 +771,7 @@ lock_prdt_lock(
lock_sys.mutex_lock();
const unsigned prdt_mode = type_mode | mode;
lock_t* lock = lock_sys.get_first(hash, block->page.id());
lock_t* lock = lock_sys.get_first(hash, id);
if (lock == NULL) {
lock = lock_rec_create(
......@@ -793,14 +793,14 @@ lock_prdt_lock(
trx->mutex_lock();
lock = lock_prdt_has_lock(
mode, type_mode, block, prdt, trx);
mode, type_mode, id, prdt, trx);
if (lock == NULL) {
lock_t* wait_for;
wait_for = lock_prdt_other_has_conflicting(
prdt_mode, block, prdt, trx);
prdt_mode, id, prdt, trx);
if (wait_for != NULL) {
......@@ -810,7 +810,7 @@ lock_prdt_lock(
SPATIAL INDEX locks */
#endif
prdt_mode,
block, PRDT_HEAPNO,
id, block->frame, PRDT_HEAPNO,
index, thr, prdt);
} else {
......@@ -921,8 +921,7 @@ lock_prdt_rec_move(
/*===============*/
const buf_block_t* receiver, /*!< in: buffer block containing
the receiving record */
const buf_block_t* donator) /*!< in: buffer block containing
the donating record */
const page_id_t donator) /*!< in: target page */
{
lock_sys.mutex_lock();
......@@ -948,19 +947,17 @@ lock_prdt_rec_move(
}
/** Removes predicate lock objects set on an index page which is discarded.
@param[in] block page to be discarded
@param[in] id page to be discarded
@param[in] lock_hash lock hash */
void
lock_prdt_page_free_from_discard(
const buf_block_t* block,
hash_table_t* lock_hash)
lock_prdt_page_free_from_discard(const page_id_t id, hash_table_t *lock_hash)
{
lock_t* lock;
lock_t* next_lock;
lock_sys.mutex_assert_locked();
lock = lock_sys.get_first(*lock_hash, block->page.id());
lock = lock_sys.get_first(*lock_hash, id);
while (lock != NULL) {
next_lock = lock_rec_get_next_on_page(lock);
......
......@@ -1988,7 +1988,7 @@ row_unlock_for_mysql(
lock_rec_unlock(
trx,
btr_pcur_get_block(pcur),
btr_pcur_get_block(pcur)->page.id(),
rec,
static_cast<enum lock_mode>(
prebuilt->select_lock_type));
......@@ -1998,7 +1998,8 @@ row_unlock_for_mysql(
lock_rec_unlock(
trx,
btr_pcur_get_block(clust_pcur),
btr_pcur_get_block(clust_pcur)
->page.id(),
rec,
static_cast<enum lock_mode>(
prebuilt->select_lock_type));
......
......@@ -2840,7 +2840,7 @@ row_upd_clust_step(
if (!flags && !node->has_clust_rec_x_lock) {
err = lock_clust_rec_modify_check_and_lock(
0, btr_pcur_get_block(pcur),
btr_pcur_get_block(pcur),
rec, index, offsets, thr);
if (err != DB_SUCCESS) {
mtr.commit();
......@@ -2850,8 +2850,8 @@ row_upd_clust_step(
ut_ad(index->table->no_rollback() || index->table->is_temporary()
|| row_get_rec_trx_id(rec, index, offsets) == trx->id
|| lock_trx_has_expl_x_lock(trx, index->table,
btr_pcur_get_block(pcur),
|| lock_trx_has_expl_x_lock(*trx, *index->table,
btr_pcur_get_block(pcur)->page.id(),
page_rec_get_heap_no(rec)));
/* NOTE: the following function calls will also commit mtr */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment