Commit 83bd4dd1 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-15914: Remove trx_t::undo_mutex

The trx_t::undo_mutex covered both some main-memory data structures
(trx_undo_t) and access to undo pages. The trx_undo_t is only
accessed by the thread that is associated with a running transaction.
Likewise, each transaction has its private set of undo pages.
The thread that is associated with an active transaction may
lock multiple undo pages concurrently, but no other thread may
lock multiple pages of a foreign transaction.

Concurrent access to the undo logs of an active transaction is possible,
but trx_undo_get_undo_rec_low() only locks one undo page at a time,
without ever holding any undo_mutex.

It seems that the trx_t::undo_mutex would have been necessary if
multi-threaded execution or rollback of a single transaction
had been implemented in InnoDB.
parent f7cac5e2
...@@ -602,7 +602,6 @@ static PSI_mutex_info all_innodb_mutexes[] = { ...@@ -602,7 +602,6 @@ static PSI_mutex_info all_innodb_mutexes[] = {
PSI_KEY(srv_misc_tmpfile_mutex), PSI_KEY(srv_misc_tmpfile_mutex),
PSI_KEY(srv_monitor_file_mutex), PSI_KEY(srv_monitor_file_mutex),
PSI_KEY(buf_dblwr_mutex), PSI_KEY(buf_dblwr_mutex),
PSI_KEY(trx_undo_mutex),
PSI_KEY(trx_pool_mutex), PSI_KEY(trx_pool_mutex),
PSI_KEY(trx_pool_manager_mutex), PSI_KEY(trx_pool_manager_mutex),
PSI_KEY(srv_sys_mutex), PSI_KEY(srv_sys_mutex),
......
...@@ -94,7 +94,6 @@ extern mysql_pfs_key_t srv_innodb_monitor_mutex_key; ...@@ -94,7 +94,6 @@ extern mysql_pfs_key_t srv_innodb_monitor_mutex_key;
extern mysql_pfs_key_t srv_misc_tmpfile_mutex_key; extern mysql_pfs_key_t srv_misc_tmpfile_mutex_key;
extern mysql_pfs_key_t srv_monitor_file_mutex_key; extern mysql_pfs_key_t srv_monitor_file_mutex_key;
extern mysql_pfs_key_t buf_dblwr_mutex_key; extern mysql_pfs_key_t buf_dblwr_mutex_key;
extern mysql_pfs_key_t trx_undo_mutex_key;
extern mysql_pfs_key_t trx_mutex_key; extern mysql_pfs_key_t trx_mutex_key;
extern mysql_pfs_key_t trx_pool_mutex_key; extern mysql_pfs_key_t trx_pool_mutex_key;
extern mysql_pfs_key_t trx_pool_manager_mutex_key; extern mysql_pfs_key_t trx_pool_manager_mutex_key;
......
...@@ -108,16 +108,6 @@ V ...@@ -108,16 +108,6 @@ V
Transaction system header Transaction system header
| |
V V
Transaction undo mutex The undo log entry must be written
| before any index page is modified.
| Transaction undo mutex is for the undo
| logs the analogue of the tree latch
| for a B-tree. If a thread has the
| trx undo mutex reserved, it is allowed
| to latch the undo log pages in any
| order, and also after it has acquired
| the fsp latch.
V
Rollback segment mutex The rollback segment mutex must be Rollback segment mutex The rollback segment mutex must be
| reserved, if, e.g., a new page must | reserved, if, e.g., a new page must
| be added to an undo log. The rollback | be added to an undo log. The rollback
...@@ -256,7 +246,6 @@ enum latch_level_t { ...@@ -256,7 +246,6 @@ enum latch_level_t {
SYNC_RSEG_HEADER_NEW, SYNC_RSEG_HEADER_NEW,
SYNC_NOREDO_RSEG, SYNC_NOREDO_RSEG,
SYNC_REDO_RSEG, SYNC_REDO_RSEG,
SYNC_TRX_UNDO,
SYNC_PURGE_LATCH, SYNC_PURGE_LATCH,
SYNC_TREE_NODE, SYNC_TREE_NODE,
SYNC_TREE_NODE_FROM_HASH, SYNC_TREE_NODE_FROM_HASH,
...@@ -338,7 +327,6 @@ enum latch_id_t { ...@@ -338,7 +327,6 @@ enum latch_id_t {
LATCH_ID_SRV_MISC_TMPFILE, LATCH_ID_SRV_MISC_TMPFILE,
LATCH_ID_SRV_MONITOR_FILE, LATCH_ID_SRV_MONITOR_FILE,
LATCH_ID_BUF_DBLWR, LATCH_ID_BUF_DBLWR,
LATCH_ID_TRX_UNDO,
LATCH_ID_TRX_POOL, LATCH_ID_TRX_POOL,
LATCH_ID_TRX_POOL_MANAGER, LATCH_ID_TRX_POOL_MANAGER,
LATCH_ID_TRX, LATCH_ID_TRX,
......
...@@ -978,12 +978,6 @@ struct trx_t { ...@@ -978,12 +978,6 @@ struct trx_t {
trx_savepoints; /*!< savepoints set with SAVEPOINT ..., trx_savepoints; /*!< savepoints set with SAVEPOINT ...,
oldest first */ oldest first */
/*------------------------------*/ /*------------------------------*/
UndoMutex undo_mutex; /*!< mutex protecting the fields in this
section (down to undo_no_arr), EXCEPT
last_sql_stat_start, which can be
accessed only when we know that there
cannot be any activity in the undo
logs! */
undo_no_t undo_no; /*!< next undo log record number to undo_no_t undo_no; /*!< next undo log record number to
assign; since the undo log is assign; since the undo log is
private for a transaction, this private for a transaction, this
...@@ -994,8 +988,7 @@ struct trx_t { ...@@ -994,8 +988,7 @@ struct trx_t {
trx_savept_t last_sql_stat_start; trx_savept_t last_sql_stat_start;
/*!< undo_no when the last sql statement /*!< undo_no when the last sql statement
was started: in case of an error, trx was started: in case of an error, trx
is rolled back down to this undo is rolled back down to this number */
number; see note at undo_mutex! */
trx_rsegs_t rsegs; /* rollback segments for undo logging */ trx_rsegs_t rsegs; /* rollback segments for undo logging */
undo_no_t roll_limit; /*!< least undo number to undo during undo_no_t roll_limit; /*!< least undo number to undo during
a partial rollback; 0 otherwise */ a partial rollback; 0 otherwise */
......
...@@ -138,7 +138,6 @@ typedef byte trx_undo_rec_t; ...@@ -138,7 +138,6 @@ typedef byte trx_undo_rec_t;
typedef ib_mutex_t RsegMutex; typedef ib_mutex_t RsegMutex;
typedef ib_mutex_t TrxMutex; typedef ib_mutex_t TrxMutex;
typedef ib_mutex_t UndoMutex;
typedef ib_mutex_t PQMutex; typedef ib_mutex_t PQMutex;
typedef ib_mutex_t TrxSysMutex; typedef ib_mutex_t TrxSysMutex;
......
...@@ -343,8 +343,8 @@ trx_undo_mem_create_at_db_start(trx_rseg_t* rseg, ulint id, ulint page_no, ...@@ -343,8 +343,8 @@ trx_undo_mem_create_at_db_start(trx_rseg_t* rseg, ulint id, ulint page_no,
#ifndef UNIV_INNOCHECKSUM #ifndef UNIV_INNOCHECKSUM
/** Transaction undo log memory object; this is protected by the undo_mutex /** Transaction undo log memory object; modified by the thread associated
in the corresponding transaction object */ with the transaction. */
struct trx_undo_t { struct trx_undo_t {
/*-----------------------------*/ /*-----------------------------*/
......
...@@ -1005,11 +1005,6 @@ que_thr_step( ...@@ -1005,11 +1005,6 @@ que_thr_step(
} else if (type == QUE_NODE_FOR) { } else if (type == QUE_NODE_FOR) {
for_step(thr); for_step(thr);
} else if (type == QUE_NODE_PROC) { } else if (type == QUE_NODE_PROC) {
/* We can access trx->undo_no without reserving
trx->undo_mutex, because there cannot be active query
threads doing updating or inserting at the moment! */
if (thr->prev_node == que_node_get_parent(node)) { if (thr->prev_node == que_node_get_parent(node)) {
trx->last_sql_stat_start.least_undo_no trx->last_sql_stat_start.least_undo_no
= trx->undo_no; = trx->undo_no;
......
...@@ -3752,8 +3752,6 @@ row_import_for_mysql( ...@@ -3752,8 +3752,6 @@ row_import_for_mysql(
/* Assign an undo segment for the transaction, so that the /* Assign an undo segment for the transaction, so that the
transaction will be recovered after a crash. */ transaction will be recovered after a crash. */
mutex_enter(&trx->undo_mutex);
/* TODO: Do not write any undo log for the IMPORT cleanup. */ /* TODO: Do not write any undo log for the IMPORT cleanup. */
{ {
mtr_t mtr; mtr_t mtr;
...@@ -3762,8 +3760,6 @@ row_import_for_mysql( ...@@ -3762,8 +3760,6 @@ row_import_for_mysql(
mtr.commit(); mtr.commit();
} }
mutex_exit(&trx->undo_mutex);
DBUG_EXECUTE_IF("ib_import_undo_assign_failure", DBUG_EXECUTE_IF("ib_import_undo_assign_failure",
err = DB_TOO_MANY_CONCURRENT_TRXS;); err = DB_TOO_MANY_CONCURRENT_TRXS;);
......
...@@ -1804,12 +1804,10 @@ row_truncate_table_for_mysql( ...@@ -1804,12 +1804,10 @@ row_truncate_table_for_mysql(
/* Step-6: Truncate operation can be rolled back in case of error /* Step-6: Truncate operation can be rolled back in case of error
till some point. Associate rollback segment to record undo log. */ till some point. Associate rollback segment to record undo log. */
if (!table->is_temporary()) { if (!table->is_temporary()) {
mutex_enter(&trx->undo_mutex);
mtr_t mtr; mtr_t mtr;
mtr.start(); mtr.start();
trx_undo_assign(trx, &err, &mtr); trx_undo_assign(trx, &err, &mtr);
mtr.commit(); mtr.commit();
mutex_exit(&trx->undo_mutex);
DBUG_EXECUTE_IF("ib_err_trunc_assigning_undo_log", DBUG_EXECUTE_IF("ib_err_trunc_assigning_undo_log",
err = DB_ERROR;); err = DB_ERROR;);
......
...@@ -499,7 +499,6 @@ LatchDebug::LatchDebug() ...@@ -499,7 +499,6 @@ LatchDebug::LatchDebug()
LEVEL_MAP_INSERT(SYNC_RSEG_HEADER_NEW); LEVEL_MAP_INSERT(SYNC_RSEG_HEADER_NEW);
LEVEL_MAP_INSERT(SYNC_NOREDO_RSEG); LEVEL_MAP_INSERT(SYNC_NOREDO_RSEG);
LEVEL_MAP_INSERT(SYNC_REDO_RSEG); LEVEL_MAP_INSERT(SYNC_REDO_RSEG);
LEVEL_MAP_INSERT(SYNC_TRX_UNDO);
LEVEL_MAP_INSERT(SYNC_PURGE_LATCH); LEVEL_MAP_INSERT(SYNC_PURGE_LATCH);
LEVEL_MAP_INSERT(SYNC_TREE_NODE); LEVEL_MAP_INSERT(SYNC_TREE_NODE);
LEVEL_MAP_INSERT(SYNC_TREE_NODE_FROM_HASH); LEVEL_MAP_INSERT(SYNC_TREE_NODE_FROM_HASH);
...@@ -767,7 +766,6 @@ LatchDebug::check_order( ...@@ -767,7 +766,6 @@ LatchDebug::check_order(
case SYNC_IBUF_BITMAP_MUTEX: case SYNC_IBUF_BITMAP_MUTEX:
case SYNC_REDO_RSEG: case SYNC_REDO_RSEG:
case SYNC_NOREDO_RSEG: case SYNC_NOREDO_RSEG:
case SYNC_TRX_UNDO:
case SYNC_PURGE_LATCH: case SYNC_PURGE_LATCH:
case SYNC_PURGE_QUEUE: case SYNC_PURGE_QUEUE:
case SYNC_DICT_AUTOINC_MUTEX: case SYNC_DICT_AUTOINC_MUTEX:
...@@ -894,8 +892,7 @@ LatchDebug::check_order( ...@@ -894,8 +892,7 @@ LatchDebug::check_order(
The purge thread can read the UNDO pages without any covering The purge thread can read the UNDO pages without any covering
mutex. */ mutex. */
ut_a(find(latches, SYNC_TRX_UNDO) != 0 ut_a(find(latches, SYNC_REDO_RSEG) != 0
|| find(latches, SYNC_REDO_RSEG) != 0
|| find(latches, SYNC_NOREDO_RSEG) != 0 || find(latches, SYNC_NOREDO_RSEG) != 0
|| basic_check(latches, level, level - 1)); || basic_check(latches, level, level - 1));
break; break;
...@@ -1400,8 +1397,6 @@ sync_latch_meta_init() ...@@ -1400,8 +1397,6 @@ sync_latch_meta_init()
LATCH_ADD_MUTEX(BUF_DBLWR, SYNC_DOUBLEWRITE, buf_dblwr_mutex_key); LATCH_ADD_MUTEX(BUF_DBLWR, SYNC_DOUBLEWRITE, buf_dblwr_mutex_key);
LATCH_ADD_MUTEX(TRX_UNDO, SYNC_TRX_UNDO, trx_undo_mutex_key);
LATCH_ADD_MUTEX(TRX_POOL, SYNC_POOL, trx_pool_mutex_key); LATCH_ADD_MUTEX(TRX_POOL, SYNC_POOL, trx_pool_mutex_key);
LATCH_ADD_MUTEX(TRX_POOL_MANAGER, SYNC_POOL_MANAGER, LATCH_ADD_MUTEX(TRX_POOL_MANAGER, SYNC_POOL_MANAGER,
......
...@@ -80,7 +80,6 @@ mysql_pfs_key_t srv_innodb_monitor_mutex_key; ...@@ -80,7 +80,6 @@ mysql_pfs_key_t srv_innodb_monitor_mutex_key;
mysql_pfs_key_t srv_misc_tmpfile_mutex_key; mysql_pfs_key_t srv_misc_tmpfile_mutex_key;
mysql_pfs_key_t srv_monitor_file_mutex_key; mysql_pfs_key_t srv_monitor_file_mutex_key;
mysql_pfs_key_t buf_dblwr_mutex_key; mysql_pfs_key_t buf_dblwr_mutex_key;
mysql_pfs_key_t trx_undo_mutex_key;
mysql_pfs_key_t trx_mutex_key; mysql_pfs_key_t trx_mutex_key;
mysql_pfs_key_t trx_pool_mutex_key; mysql_pfs_key_t trx_pool_mutex_key;
mysql_pfs_key_t trx_pool_manager_mutex_key; mysql_pfs_key_t trx_pool_manager_mutex_key;
......
...@@ -1904,7 +1904,6 @@ trx_undo_report_rename(trx_t* trx, const dict_table_t* table) ...@@ -1904,7 +1904,6 @@ trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
mtr_t mtr; mtr_t mtr;
dberr_t err; dberr_t err;
mtr.start(); mtr.start();
mutex_enter(&trx->undo_mutex);
if (buf_block_t* block = trx_undo_assign(trx, &err, &mtr)) { if (buf_block_t* block = trx_undo_assign(trx, &err, &mtr)) {
trx_undo_t* undo = trx->rsegs.m_redo.undo; trx_undo_t* undo = trx->rsegs.m_redo.undo;
ut_ad(err == DB_SUCCESS); ut_ad(err == DB_SUCCESS);
...@@ -1938,7 +1937,6 @@ trx_undo_report_rename(trx_t* trx, const dict_table_t* table) ...@@ -1938,7 +1937,6 @@ trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
mtr.commit(); mtr.commit();
} }
mutex_exit(&trx->undo_mutex);
return err; return err;
} }
...@@ -2005,7 +2003,6 @@ trx_undo_report_row_operation( ...@@ -2005,7 +2003,6 @@ trx_undo_report_row_operation(
rseg = trx->rsegs.m_redo.rseg; rseg = trx->rsegs.m_redo.rseg;
} }
mutex_enter(&trx->undo_mutex);
dberr_t err; dberr_t err;
buf_block_t* undo_block = trx_undo_assign_low(trx, rseg, pundo, buf_block_t* undo_block = trx_undo_assign_low(trx, rseg, pundo,
&err, &mtr); &err, &mtr);
...@@ -2068,8 +2065,6 @@ trx_undo_report_row_operation( ...@@ -2068,8 +2065,6 @@ trx_undo_report_row_operation(
undo->guess_block = undo_block; undo->guess_block = undo_block;
ut_ad(!undo->empty()); ut_ad(!undo->empty());
mutex_exit(&trx->undo_mutex);
if (!is_temp) { if (!is_temp) {
const undo_no_t limit = undo->top_undo_no; const undo_no_t limit = undo->top_undo_no;
/* Determine if this is the first time /* Determine if this is the first time
...@@ -2127,7 +2122,6 @@ trx_undo_report_row_operation( ...@@ -2127,7 +2122,6 @@ trx_undo_report_row_operation(
err = DB_OUT_OF_FILE_SPACE; err = DB_OUT_OF_FILE_SPACE;
err_exit: err_exit:
mutex_exit(&trx->undo_mutex);
mtr_commit(&mtr); mtr_commit(&mtr);
return(err); return(err);
} }
......
...@@ -895,8 +895,6 @@ static ...@@ -895,8 +895,6 @@ static
void void
trx_roll_try_truncate(trx_t* trx) trx_roll_try_truncate(trx_t* trx)
{ {
ut_ad(mutex_own(&trx->undo_mutex));
trx->pages_undone = 0; trx->pages_undone = 0;
undo_no_t undo_no = trx->undo_no; undo_no_t undo_no = trx->undo_no;
...@@ -934,8 +932,6 @@ trx_roll_pop_top_rec( ...@@ -934,8 +932,6 @@ trx_roll_pop_top_rec(
trx_undo_t* undo, /*!< in: undo log */ trx_undo_t* undo, /*!< in: undo log */
mtr_t* mtr) /*!< in: mtr */ mtr_t* mtr) /*!< in: mtr */
{ {
ut_ad(mutex_own(&trx->undo_mutex));
page_t* undo_page = trx_undo_page_get_s_latched( page_t* undo_page = trx_undo_page_get_s_latched(
page_id_t(undo->rseg->space->id, undo->top_page_no), mtr); page_id_t(undo->rseg->space->id, undo->top_page_no), mtr);
...@@ -974,8 +970,6 @@ trx_roll_pop_top_rec( ...@@ -974,8 +970,6 @@ trx_roll_pop_top_rec(
trx_undo_rec_t* trx_undo_rec_t*
trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap) trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap)
{ {
mutex_enter(&trx->undo_mutex);
if (trx->pages_undone >= TRX_ROLL_TRUNC_THRESHOLD) { if (trx->pages_undone >= TRX_ROLL_TRUNC_THRESHOLD) {
trx_roll_try_truncate(trx); trx_roll_try_truncate(trx);
} }
...@@ -1021,7 +1015,6 @@ trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap) ...@@ -1021,7 +1015,6 @@ trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap)
later, we will default to a full ROLLBACK. */ later, we will default to a full ROLLBACK. */
trx->roll_limit = 0; trx->roll_limit = 0;
trx->in_rollback = false; trx->in_rollback = false;
mutex_exit(&trx->undo_mutex);
return(NULL); return(NULL);
} }
...@@ -1058,7 +1051,6 @@ trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap) ...@@ -1058,7 +1051,6 @@ trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap)
} }
trx->undo_no = undo_no; trx->undo_no = undo_no;
mutex_exit(&trx->undo_mutex);
trx_undo_rec_t* undo_rec_copy = trx_undo_rec_copy(undo_rec, heap); trx_undo_rec_t* undo_rec_copy = trx_undo_rec_copy(undo_rec, heap);
mtr.commit(); mtr.commit();
......
...@@ -225,7 +225,6 @@ struct TrxFactory { ...@@ -225,7 +225,6 @@ struct TrxFactory {
&trx_named_savept_t::trx_savepoints); &trx_named_savept_t::trx_savepoints);
mutex_create(LATCH_ID_TRX, &trx->mutex); mutex_create(LATCH_ID_TRX, &trx->mutex);
mutex_create(LATCH_ID_TRX_UNDO, &trx->undo_mutex);
lock_trx_alloc_locks(trx); lock_trx_alloc_locks(trx);
} }
...@@ -252,7 +251,6 @@ struct TrxFactory { ...@@ -252,7 +251,6 @@ struct TrxFactory {
ut_free(trx->detailed_error); ut_free(trx->detailed_error);
mutex_free(&trx->mutex); mutex_free(&trx->mutex);
mutex_free(&trx->undo_mutex);
trx->mod_tables.~trx_mod_tables_t(); trx->mod_tables.~trx_mod_tables_t();
...@@ -493,11 +491,9 @@ void trx_free(trx_t*& trx) ...@@ -493,11 +491,9 @@ void trx_free(trx_t*& trx)
/* Unpoison the memory for innodb_monitor_set_option; /* Unpoison the memory for innodb_monitor_set_option;
it is operating also on the freed transaction objects. */ it is operating also on the freed transaction objects. */
MEM_UNDEFINED(&trx->mutex, sizeof trx->mutex); MEM_UNDEFINED(&trx->mutex, sizeof trx->mutex);
MEM_UNDEFINED(&trx->undo_mutex, sizeof trx->undo_mutex);
/* Declare the contents as initialized for Valgrind; /* Declare the contents as initialized for Valgrind;
we checked that it was initialized in trx_pools->mem_free(trx). */ we checked that it was initialized in trx_pools->mem_free(trx). */
UNIV_MEM_VALID(&trx->mutex, sizeof trx->mutex); UNIV_MEM_VALID(&trx->mutex, sizeof trx->mutex);
UNIV_MEM_VALID(&trx->undo_mutex, sizeof trx->undo_mutex);
trx = NULL; trx = NULL;
} }
...@@ -1114,9 +1110,6 @@ trx_write_serialisation_history( ...@@ -1114,9 +1110,6 @@ trx_write_serialisation_history(
undo log to the purge queue. */ undo log to the purge queue. */
trx_serialise(trx); trx_serialise(trx);
/* It is not necessary to acquire trx->undo_mutex here because
only a single OS thread is allowed to commit this transaction.
The undo logs will be processed and purged later. */
if (UNIV_LIKELY_NULL(old_insert)) { if (UNIV_LIKELY_NULL(old_insert)) {
UT_LIST_REMOVE(rseg->old_insert_list, old_insert); UT_LIST_REMOVE(rseg->old_insert_list, old_insert);
trx_purge_add_undo_to_history(trx, old_insert, mtr); trx_purge_add_undo_to_history(trx, old_insert, mtr);
...@@ -1968,10 +1961,6 @@ trx_prepare_low(trx_t* trx) ...@@ -1968,10 +1961,6 @@ trx_prepare_low(trx_t* trx)
mtr_t mtr; mtr_t mtr;
/* It is not necessary to acquire trx->undo_mutex here because
only the owning (connection) thread of the transaction is
allowed to perform XA PREPARE. */
if (trx_undo_t* undo = trx->rsegs.m_noredo.undo) { if (trx_undo_t* undo = trx->rsegs.m_noredo.undo) {
ut_ad(undo->rseg == trx->rsegs.m_noredo.rseg); ut_ad(undo->rseg == trx->rsegs.m_noredo.rseg);
......
...@@ -81,12 +81,12 @@ The contention of the trx_sys.mutex should be minimized. When a transaction ...@@ -81,12 +81,12 @@ The contention of the trx_sys.mutex should be minimized. When a transaction
does its first insert or modify in an index, an undo log is assigned for it. does its first insert or modify in an index, an undo log is assigned for it.
Then we must have an x-latch to the rollback segment header. Then we must have an x-latch to the rollback segment header.
When the transaction performs modifications or rolls back, its When the transaction performs modifications or rolls back, its
undo log is protected by undo page latches and trx_t::undo_mutex. undo log is protected by undo page latches.
Only the thread that is associated with the transaction may hold multiple Only the thread that is associated with the transaction may hold multiple
undo page latches at a time. Undo pages are always private to a single undo page latches at a time. Undo pages are always private to a single
transaction. Other threads that are performing MVCC reads transaction. Other threads that are performing MVCC reads
or checking for implicit locks will lock at most one undo page at a time or checking for implicit locks will lock at most one undo page at a time
in trx_undo_get_undo_rec_low(), without holding any undo_mutex. in trx_undo_get_undo_rec_low().
When the transaction commits, its persistent undo log is added When the transaction commits, its persistent undo log is added
to the history list. If it is not suitable for reuse, its slot is reset. to the history list. If it is not suitable for reuse, its slot is reset.
In both cases, an x-latch must be acquired on the rollback segment header page. In both cases, an x-latch must be acquired on the rollback segment header page.
...@@ -769,8 +769,6 @@ trx_undo_parse_page_header( ...@@ -769,8 +769,6 @@ trx_undo_parse_page_header(
buf_block_t* buf_block_t*
trx_undo_add_page(trx_t* trx, trx_undo_t* undo, mtr_t* mtr) trx_undo_add_page(trx_t* trx, trx_undo_t* undo, mtr_t* mtr)
{ {
ut_ad(mutex_own(&trx->undo_mutex));
trx_rseg_t* rseg = undo->rseg; trx_rseg_t* rseg = undo->rseg;
buf_block_t* new_block = NULL; buf_block_t* new_block = NULL;
ulint n_reserved; ulint n_reserved;
...@@ -1397,7 +1395,6 @@ A new undo log is created or a cached undo log reused. ...@@ -1397,7 +1395,6 @@ A new undo log is created or a cached undo log reused.
buf_block_t* buf_block_t*
trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr) trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
{ {
ut_ad(mutex_own(&trx->undo_mutex));
ut_ad(mtr->get_log_mode() == MTR_LOG_ALL); ut_ad(mtr->get_log_mode() == MTR_LOG_ALL);
trx_undo_t* undo = trx->rsegs.m_redo.undo; trx_undo_t* undo = trx->rsegs.m_redo.undo;
...@@ -1450,7 +1447,6 @@ trx_undo_assign_low(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo, ...@@ -1450,7 +1447,6 @@ trx_undo_assign_low(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
{ {
const bool is_temp = rseg == trx->rsegs.m_noredo.rseg; const bool is_temp = rseg == trx->rsegs.m_noredo.rseg;
ut_ad(mutex_own(&trx->undo_mutex));
ut_ad(rseg == trx->rsegs.m_redo.rseg ut_ad(rseg == trx->rsegs.m_redo.rseg
|| rseg == trx->rsegs.m_noredo.rseg); || rseg == trx->rsegs.m_noredo.rseg);
ut_ad(undo == (is_temp ut_ad(undo == (is_temp
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment