Commit 5defdc38 authored by Marko Mäkelä's avatar Marko Mäkelä

Cleanup: Remove mtr_state_t and mtr_t::m_state

mtr_t::is_active(), mtr_t::is_committed(): Make debug-only.
parent c69a8629
......@@ -125,8 +125,6 @@ struct mtr_memo_slot_t {
/** Mini-transaction handle and buffer */
struct mtr_t {
mtr_t() : m_state(MTR_STATE_INIT) {}
/** Start a mini-transaction. */
void start();
......@@ -138,12 +136,11 @@ struct mtr_t {
MLOG_FILE_NAME records and an optional MLOG_CHECKPOINT marker.
The caller must invoke log_mutex_enter() and log_mutex_exit().
This is to be used at log_checkpoint().
@param[in] checkpoint_lsn log checkpoint LSN, or 0 */
void commit_files(lsn_t checkpoint_lsn = 0);
@param checkpoint_lsn the log sequence number of a checkpoint, or 0 */
void commit_files(lsn_t checkpoint_lsn= 0);
/** Return current size of the buffer.
@return savepoint */
ulint get_savepoint() const {ut_ad(is_active()); return m_memo.size();}
/** @return mini-transaction savepoint (current size of m_memo) */
ulint get_savepoint() const { ut_ad(is_active()); return m_memo.size(); }
/** Release the (index tree) s-latch stored in an mtr memo after a
savepoint.
......@@ -331,10 +328,6 @@ struct mtr_t {
/** @return true if we are inside the change buffer code */
bool is_inside_ibuf() const { return m_inside_ibuf; }
/*
@return true if the mini-transaction is active */
bool is_active() const { return m_state == MTR_STATE_ACTIVE; }
/** Get flush observer
@return flush observer */
FlushObserver* get_flush_observer() const { return m_flush_observer; }
......@@ -383,9 +376,6 @@ struct mtr_t {
/** Print info of an mtr handle. */
void print() const;
/** @return true if the mini-transaction has committed */
bool has_committed() const { return m_state == MTR_STATE_COMMITTED; }
/** @return true if mini-transaction contains modifications. */
bool has_modifications() const { return m_modifications; }
......@@ -490,13 +480,26 @@ struct mtr_t {
inline ulint prepare_write();
/** Append the redo log records to the redo log buffer.
@param[in] len number of bytes to write
@param len number of bytes to write
@return start_lsn */
inline lsn_t finish_write(ulint len);
/** Release the resources */
inline void release_resources();
#ifdef UNIV_DEBUG
public:
/** @return whether the mini-transaction is active */
bool is_active() const { ut_ad(!m_commit || m_start); return m_start; }
/** @return whether the mini-transaction has been committed */
bool has_committed() const { ut_ad(!m_commit || m_start); return m_commit; }
private:
/** whether start() has been called */
bool m_start= false;
/** whether commit() has been called */
bool m_commit= false;
#endif
/** memo stack for locks etc. */
mtr_buf_t m_memo;
......@@ -527,9 +530,6 @@ struct mtr_t {
/** User tablespace that is being modified by the mini-transaction */
fil_space_t* m_user_space;
/** State of the transaction */
mtr_state_t m_state;
/** Flush Observer */
FlushObserver* m_flush_observer;
......
......@@ -272,10 +272,4 @@ enum mtr_memo_type_t {
};
#endif /* !UNIV_CHECKSUM */
enum mtr_state_t {
MTR_STATE_INIT = 0,
MTR_STATE_ACTIVE,
MTR_STATE_COMMITTED
};
#endif /* mtr0types_h */
......@@ -380,6 +380,9 @@ void mtr_t::start()
{
UNIV_MEM_INVALID(this, sizeof *this);
ut_d(m_start= true);
ut_d(m_commit= false);
new(&m_memo) mtr_buf_t();
new(&m_log) mtr_buf_t();
......@@ -389,24 +392,23 @@ void mtr_t::start()
m_n_log_recs= 0;
m_log_mode= MTR_LOG_ALL;
ut_d(m_user_space_id= TRX_SYS_SPACE);
m_user_space= NULL;
m_state= MTR_STATE_ACTIVE;
m_flush_observer= NULL;
m_user_space= nullptr;
m_flush_observer= nullptr;
m_commit_lsn= 0;
}
/** Release the resources */
inline void mtr_t::release_resources()
{
ut_ad(is_active());
ut_d(m_memo.for_each_block_in_reverse(CIterate<DebugCheck>()));
m_log.erase();
m_memo.erase();
m_state= MTR_STATE_COMMITTED;
ut_d(m_commit= true);
}
/** Commit a mini-transaction. */
void
mtr_t::commit()
void mtr_t::commit()
{
ut_ad(is_active());
ut_ad(!is_inside_ibuf());
......@@ -415,8 +417,7 @@ mtr_t::commit()
ut_ad(!m_modifications || !recv_no_log_write);
ut_ad(!m_modifications || m_log_mode != MTR_LOG_NONE);
if (m_modifications
&& (m_n_log_recs || m_log_mode == MTR_LOG_NO_REDO))
if (m_modifications && (m_n_log_recs || m_log_mode == MTR_LOG_NO_REDO))
{
ut_ad(!srv_read_only_mode || m_log_mode == MTR_LOG_NO_REDO);
......
......@@ -64,14 +64,10 @@ class index_tuple_info_t {
/** constructor
@param[in] heap memory heap
@param[in] index index to be created */
index_tuple_info_t(
mem_heap_t* heap,
dict_index_t* index) UNIV_NOTHROW
{
m_heap = heap;
m_index = index;
m_dtuple_vec = UT_NEW_NOKEY(idx_tuple_vec());
}
index_tuple_info_t(mem_heap_t* heap, dict_index_t* index) :
m_dtuple_vec(UT_NEW_NOKEY(idx_tuple_vec())),
m_index(index), m_heap(heap)
{ ut_ad(index->is_spatial()); }
/** destructor */
~index_tuple_info_t()
......@@ -107,13 +103,11 @@ class index_tuple_info_t {
@param[in] trx_id transaction id
@param[in,out] row_heap memory heap
@param[in] pcur cluster index scanning cursor
@param[in,out] mtr_started whether scan_mtr is active
@param[in,out] scan_mtr mini-transaction for pcur
@return DB_SUCCESS if successful, else error number */
inline dberr_t insert(
trx_id_t trx_id,
mem_heap_t* row_heap,
btr_pcur_t* pcur,
mtr_t* scan_mtr)
dberr_t insert(trx_id_t trx_id, mem_heap_t* row_heap, btr_pcur_t* pcur,
bool& mtr_started, mtr_t* scan_mtr) const
{
big_rec_t* big_rec;
rec_t* rec;
......@@ -128,7 +122,7 @@ class index_tuple_info_t {
| BTR_NO_LOCKING_FLAG
| BTR_KEEP_SYS_FLAG | BTR_CREATE_FLAG;
ut_ad(dict_index_is_spatial(m_index));
ut_ad(mtr_started == scan_mtr->is_active());
DBUG_EXECUTE_IF("row_merge_instrument_log_check_flush",
log_sys.check_flush_or_checkpoint = true;
......@@ -141,10 +135,11 @@ class index_tuple_info_t {
ut_ad(dtuple);
if (log_sys.check_flush_or_checkpoint) {
if (scan_mtr->is_active()) {
if (mtr_started) {
btr_pcur_move_to_prev_on_page(pcur);
btr_pcur_store_position(pcur, scan_mtr);
scan_mtr->commit();
mtr_started = false;
}
log_free_check();
......@@ -247,13 +242,13 @@ class index_tuple_info_t {
idx_tuple_vec;
/** vector used to cache index rows made from cluster index scan */
idx_tuple_vec* m_dtuple_vec;
idx_tuple_vec* const m_dtuple_vec;
/** the index being built */
dict_index_t* m_index;
dict_index_t* const m_index;
/** memory heap for creating index tuples */
mem_heap_t* m_heap;
mem_heap_t* const m_heap;
};
/* Maximum pending doc memory limit in bytes for a fts tokenization thread */
......@@ -1574,10 +1569,11 @@ row_mtuple_cmp(
@param[in] trx_id transaction id
@param[in] sp_tuples cached spatial rows
@param[in] num_spatial number of spatial indexes
@param[in,out] row_heap heap for insert
@param[in,out] heap heap for insert
@param[in,out] sp_heap heap for tuples
@param[in,out] pcur cluster index cursor
@param[in,out] mtr mini transaction
@param[in,out] started whether mtr is active
@param[in,out] mtr mini-transaction
@return DB_SUCCESS or error number */
static
dberr_t
......@@ -1585,30 +1581,21 @@ row_merge_spatial_rows(
trx_id_t trx_id,
index_tuple_info_t** sp_tuples,
ulint num_spatial,
mem_heap_t* row_heap,
mem_heap_t* heap,
mem_heap_t* sp_heap,
btr_pcur_t* pcur,
bool& started,
mtr_t* mtr)
{
dberr_t err = DB_SUCCESS;
if (!sp_tuples)
return DB_SUCCESS;
if (sp_tuples == NULL) {
return(DB_SUCCESS);
}
ut_ad(sp_heap != NULL);
for (ulint j = 0; j < num_spatial; j++) {
err = sp_tuples[j]->insert(trx_id, row_heap, pcur, mtr);
if (err != DB_SUCCESS) {
return(err);
}
}
for (ulint j= 0; j < num_spatial; j++)
if (dberr_t err= sp_tuples[j]->insert(trx_id, heap, pcur, started, mtr))
return err;
mem_heap_empty(sp_heap);
return(err);
return DB_SUCCESS;
}
/** Check if the geometry field is valid.
......@@ -1711,6 +1698,7 @@ row_merge_read_clustered_index(
btr_pcur_t pcur; /* Cursor on the clustered
index */
mtr_t mtr; /* Mini transaction */
bool mtr_started = false;
dberr_t err = DB_SUCCESS;/* Return code */
ulint n_nonnull = 0; /* number of columns
changed to NOT NULL */
......@@ -1833,7 +1821,8 @@ row_merge_read_clustered_index(
ut_ad(count == num_spatial);
}
mtr_start(&mtr);
mtr.start();
mtr_started = true;
/* Find the clustered index and create a persistent cursor
based on that. */
......@@ -1852,6 +1841,7 @@ row_merge_read_clustered_index(
btr_pcur_open_at_index_side(
true, clust_index, BTR_SEARCH_LEAF, &pcur, true, 0, &mtr);
mtr_started = true;
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
if (rec_is_metadata(btr_pcur_get_rec(&pcur), *clust_index)) {
ut_ad(btr_pcur_is_on_user_rec(&pcur));
......@@ -1958,13 +1948,13 @@ row_merge_read_clustered_index(
/* Insert the cached spatial index rows. */
err = row_merge_spatial_rows(
trx->id, sp_tuples, num_spatial,
row_heap, sp_heap, &pcur, &mtr);
row_heap, sp_heap, &pcur, mtr_started, &mtr);
if (err != DB_SUCCESS) {
goto func_exit;
}
if (!mtr.is_active()) {
if (!mtr_started) {
goto scan_next;
}
......@@ -1988,12 +1978,16 @@ row_merge_read_clustered_index(
== clust_index->page);
btr_pcur_store_position(&pcur, &mtr);
mtr_commit(&mtr);
mtr.commit();
mtr_started = false;
/* Give the waiters a chance to proceed. */
os_thread_yield();
scan_next:
mtr_start(&mtr);
ut_ad(!mtr_started);
ut_ad(mtr.is_active());
mtr.start();
mtr_started = true;
/* Restore position on the record, or its
predecessor if the record was purged
meanwhile. */
......@@ -2005,7 +1999,8 @@ row_merge_read_clustered_index(
&pcur, &mtr)) {
end_of_index:
row = NULL;
mtr_commit(&mtr);
mtr.commit();
mtr_started = false;
mem_heap_free(row_heap);
row_heap = NULL;
ut_free(nonnull);
......@@ -2471,7 +2466,8 @@ row_merge_read_clustered_index(
trx->id, sp_tuples,
num_spatial,
row_heap, sp_heap,
&pcur, &mtr);
&pcur, mtr_started,
&mtr);
if (err != DB_SUCCESS) {
goto func_exit;
......@@ -2479,20 +2475,21 @@ row_merge_read_clustered_index(
/* We are not at the end of
the scan yet. We must
mtr_commit() in order to be
mtr.commit() in order to be
able to call log_free_check()
in row_merge_insert_index_tuples().
Due to mtr_commit(), the
Due to mtr.commit(), the
current row will be invalid, and
we must reread it on the next
loop iteration. */
if (mtr.is_active()) {
if (mtr_started) {
btr_pcur_move_to_prev_on_page(
&pcur);
btr_pcur_store_position(
&pcur, &mtr);
mtr.commit();
mtr_started = false;
}
}
......@@ -2548,7 +2545,8 @@ row_merge_read_clustered_index(
next record (the one which we
had to ignore due to the buffer
overflow). */
mtr_start(&mtr);
mtr.start();
mtr_started = true;
btr_pcur_restore_position(
BTR_SEARCH_LEAF, &pcur,
&mtr);
......@@ -2734,8 +2732,9 @@ row_merge_read_clustered_index(
}
func_exit:
if (mtr.is_active()) {
mtr_commit(&mtr);
ut_ad(mtr_started == mtr.is_active());
if (mtr_started) {
mtr.commit();
}
if (row_heap) {
mem_heap_free(row_heap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment