Commit cdde97db authored by Vlad Lesin's avatar Vlad Lesin

MDEV-34466 XA prepare don't release unmodified records for some cases

There is no need to exclude exclusive non-gap locks from the procedure
of locks releasing on XA PREPARE execution in
lock_release_on_prepare_try() after commit
17e59ed3 (MDEV-33454), because
lock_rec_unlock_unmodified() should check if the record was modified
with the XA, and release the lock if it was not.

lock_release_on_prepare_try(): don't skip X-locks, let
lock_rec_unlock_unmodified() to process them.

lock_sec_rec_some_has_impl(): add template parameter for not acquiring
trx_t::mutex for the case if a caller already holds the mutex, don't
crash if lock's bitmap is clean.

row_vers_impl_x_locked(), row_vers_impl_x_locked_low(): add new argument
to skip trx_t::mutex acquiring.

rw_trx_hash_t::validate_element(): don't acquire trx_t::mutex if the
current thread already holds it.

Thanks to Andrei Elkin for finding the bug.
Reviewed by Marko Mäkelä, Debarun Banerjee.
parent ce272f7c
CREATE TABLE t1 (a int, b int, c int,
INDEX i1(a),
INDEX i2(b))
ENGINE=InnoDB;
INSERT INTO t1 VALUES
(1,1,0), (1,2,0),
(2,1,0), (2,2,0);
SET @old_timeout= @@GLOBAL.innodb_lock_wait_timeout;
SET GLOBAL innodb_lock_wait_timeout= 1;
XA START "t1";
UPDATE t1 FORCE INDEX (i2) SET c=c+1 WHERE a=1 AND b=1;
XA END "t1";
XA PREPARE "t1";
connect con1,localhost,root,,;
SELECT * FROM t1 FORCE INDEX (i1) WHERE a=2 AND b=1 FOR UPDATE;
a b c
2 1 0
disconnect con1;
connection default;
XA COMMIT "t1";
DROP TABLE t1;
SET GLOBAL innodb_lock_wait_timeout= @old_timeout;
--source include/have_innodb.inc
--source include/count_sessions.inc
CREATE TABLE t1 (a int, b int, c int,
INDEX i1(a),
INDEX i2(b))
ENGINE=InnoDB;
INSERT INTO t1 VALUES
(1,1,0), (1,2,0),
(2,1,0), (2,2,0);
SET @old_timeout= @@GLOBAL.innodb_lock_wait_timeout;
SET GLOBAL innodb_lock_wait_timeout= 1;
XA START "t1";
UPDATE t1 FORCE INDEX (i2) SET c=c+1 WHERE a=1 AND b=1;
XA END "t1";
XA PREPARE "t1";
--connect(con1,localhost,root,,)
# (pk, 2, 1, 0) record is X-locked but not modified in clustered index with the
# above XA transaction, if the bug is not fixed, the following SELECT will be
# blocked by the XA transaction if XA PREPARE does not release the unmodified
# record.
SELECT * FROM t1 FORCE INDEX (i1) WHERE a=2 AND b=1 FOR UPDATE;
--disconnect con1
--connection default
XA COMMIT "t1";
DROP TABLE t1;
SET GLOBAL innodb_lock_wait_timeout= @old_timeout;
--source include/wait_until_count_sessions.inc
...@@ -500,12 +500,13 @@ class rw_trx_hash_t ...@@ -500,12 +500,13 @@ class rw_trx_hash_t
ut_ad(!trx->read_only || !trx->rsegs.m_redo.rseg); ut_ad(!trx->read_only || !trx->rsegs.m_redo.rseg);
ut_ad(!trx->is_autocommit_non_locking()); ut_ad(!trx->is_autocommit_non_locking());
/* trx->state can be anything except TRX_STATE_NOT_STARTED */ /* trx->state can be anything except TRX_STATE_NOT_STARTED */
ut_d(trx->mutex_lock()); ut_d(bool acquire_trx_mutex = !trx->mutex_is_owner());
ut_d(if (acquire_trx_mutex) trx->mutex_lock());
ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE) || ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE) ||
trx_state_eq(trx, TRX_STATE_COMMITTED_IN_MEMORY) || trx_state_eq(trx, TRX_STATE_COMMITTED_IN_MEMORY) ||
trx_state_eq(trx, TRX_STATE_PREPARED_RECOVERED) || trx_state_eq(trx, TRX_STATE_PREPARED_RECOVERED) ||
trx_state_eq(trx, TRX_STATE_PREPARED)); trx_state_eq(trx, TRX_STATE_PREPARED));
ut_d(trx->mutex_unlock()); ut_d(if (acquire_trx_mutex) trx->mutex_unlock());
} }
......
...@@ -4518,7 +4518,9 @@ static void lock_rec_unlock_unmodified(hash_cell_t &cell, lock_t *lock, ...@@ -4518,7 +4518,9 @@ static void lock_rec_unlock_unmodified(hash_cell_t &cell, lock_t *lock,
{ {
if (UNIV_UNLIKELY(!page_is_leaf(block->page.frame))) if (UNIV_UNLIKELY(!page_is_leaf(block->page.frame)))
{ {
ut_ad("corrupted lock system" == 0); /* There can be locks with clean bitmap on non-leaf pages in the case of
page split. See lock_move_rec_list_end() call stack for details. */
ut_ad(lock_rec_find_set_bit(lock) == ULINT_UNDEFINED);
goto func_exit; goto func_exit;
} }
...@@ -4540,8 +4542,15 @@ static void lock_rec_unlock_unmodified(hash_cell_t &cell, lock_t *lock, ...@@ -4540,8 +4542,15 @@ static void lock_rec_unlock_unmodified(hash_cell_t &cell, lock_t *lock,
{ {
offsets= rec_get_offsets(rec, index, offsets, index->n_core_fields, offsets= rec_get_offsets(rec, index, offsets, index->n_core_fields,
ULINT_UNDEFINED, &heap); ULINT_UNDEFINED, &heap);
if (lock->trx != trx_t *impl_trx=
lock_sec_rec_some_has_impl(lock->trx, rec, index, offsets)) lock_sec_rec_some_has_impl(lock->trx, rec, index, offsets);
/* row_vers_impl_x_locked_low() references trx in both cases, i.e.
when caller trx is equal to trx, which modified the record, it
references the trx directly, otherwise it invokes trx_sys.find(),
which also references trx, so the trx must be released anyway. */
if (impl_trx)
impl_trx->release_reference();
if (lock->trx != impl_trx)
goto unlock_rec; goto unlock_rec;
} }
} }
...@@ -4591,8 +4600,6 @@ static bool lock_release_on_prepare_try(trx_t *trx) ...@@ -4591,8 +4600,6 @@ static bool lock_release_on_prepare_try(trx_t *trx)
bool supremum_bit= lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM); bool supremum_bit= lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM);
bool rec_granted_exclusive_not_gap= bool rec_granted_exclusive_not_gap=
lock->is_rec_granted_exclusive_not_gap(); lock->is_rec_granted_exclusive_not_gap();
if (!supremum_bit && rec_granted_exclusive_not_gap)
continue;
if (UNIV_UNLIKELY(lock->type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE))) if (UNIV_UNLIKELY(lock->type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)))
continue; /* SPATIAL INDEX locking is broken. */ continue; /* SPATIAL INDEX locking is broken. */
auto cell= auto cell=
......
...@@ -196,11 +196,13 @@ row_vers_impl_x_locked_low( ...@@ -196,11 +196,13 @@ row_vers_impl_x_locked_low(
version, clust_index, clust_offsets, version, clust_index, clust_offsets,
heap, &prev_version, mtr, 0, NULL, heap, &prev_version, mtr, 0, NULL,
dict_index_has_virtual(index) ? &vrow : NULL); dict_index_has_virtual(index) ? &vrow : NULL);
ut_d(bool owns_trx_mutex = trx->mutex_is_owner());
ut_d(trx->mutex_lock()); ut_d(if (!owns_trx_mutex)
trx->mutex_lock();)
const bool committed = trx_state_eq( const bool committed = trx_state_eq(
trx, TRX_STATE_COMMITTED_IN_MEMORY); trx, TRX_STATE_COMMITTED_IN_MEMORY);
ut_d(trx->mutex_unlock()); ut_d(if (!owns_trx_mutex)
trx->mutex_unlock();)
/* The oldest visible clustered index version must not be /* The oldest visible clustered index version must not be
delete-marked, because we never start a transaction by delete-marked, because we never start a transaction by
...@@ -400,7 +402,8 @@ row_vers_impl_x_locked( ...@@ -400,7 +402,8 @@ row_vers_impl_x_locked(
const rec_t* clust_rec; const rec_t* clust_rec;
dict_index_t* clust_index; dict_index_t* clust_index;
lock_sys.assert_unlocked(); /* The current function can be called from lock_rec_unlock_unmodified()
under lock_sys.wr_lock() */
mtr_start(&mtr); mtr_start(&mtr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment