Commit 72e2d1d2 authored by Marko Mäkelä's avatar Marko Mäkelä Committed by Aleksey Midenkov

MDEV-25004 Refactorings

  * Avoid some pessimization
  * Slightly smaller upgrade dataset
  * Simplify vers_row_same_trx() and its caller
parent e056efdd
......@@ -12,7 +12,7 @@
# Examples
#
# export OLD_BINDIR="/home/midenok/src/mariadb/10.3b/build"
# mtr innodb_fts.versioning,orig_stopword,prepare
# ./mtr innodb_fts.versioning,prepare
#
if ($MTR_COMBINATION_PREPARE)
......@@ -56,11 +56,16 @@ if ($MTR_COMBINATION_PREPARE)
--exec mkdir -p $std_dir
--exec cp -af $datadir/ibdata1 $datadir/test/*.frm $std_dir
# zero out the doublewrite buffer
--exec dd if=/dev/zero of=$std_dir/ibdata1 bs=16k seek=64 count=128 conv=notrunc
--exec gzip -9f $std_dir/ibdata1 $std_dir/*.frm
}
if ($MTR_COMBINATION_UPGRADE)
{
--disable_query_log
call mtr.add_suppression("InnoDB: Table `mysql`.\`innodb_(table|index)_stats`");
--enable_query_log
--source include/shutdown_mysqld.inc
--exec rm -f $datadir/test/*.ibd $datadir/ib*
--exec cp -af $std_dir/ibdata1.gz $datadir
......
......@@ -2059,39 +2059,48 @@ row_ins_dupl_error_with_rec(
return(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
}
/** @return true if history row was inserted by this transaction
(row TRX_ID is the same as current TRX_ID). */
static
dberr_t vers_row_same_trx(dict_index_t* index, const rec_t* rec,
que_thr_t* thr, bool *same_trx)
/** Determine whether a history row was inserted by this transaction
(row TRX_ID is the same as current TRX_ID).
@param index secondary index
@param rec secondary index record
@param trx transaction
@return error code
@retval DB_SUCCESS on success
@retval DB_FOREIGN_DUPLICATE_KEY if a history row was inserted by trx */
static dberr_t vers_row_same_trx(dict_index_t* index, const rec_t* rec,
const trx_t& trx)
{
mtr_t mtr;
dberr_t ret= DB_SUCCESS;
ulint trx_id_len;
const byte *trx_id_bytes;
trx_id_t trx_id;
dict_index_t *clust_index= dict_table_get_first_index(index->table);
ut_ad(index != clust_index);
mtr.start();
rec_t *clust_rec=
row_get_clust_rec(BTR_SEARCH_LEAF, rec, index, &clust_index, &mtr);
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs *clust_offs= offsets_;
rec_offs_init(offsets_);
mem_heap_t *heap= NULL;
if (clust_rec)
if (const rec_t *clust_rec=
row_get_clust_rec(BTR_SEARCH_LEAF, rec, index, &clust_index, &mtr))
{
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs *clust_offs= offsets_;
rec_offs_init(offsets_);
mem_heap_t *heap= NULL;
clust_offs=
rec_get_offsets(clust_rec, clust_index, clust_offs,
clust_index->n_core_fields, ULINT_UNDEFINED, &heap);
if (!clust_index->vers_history_row(clust_rec, clust_offs))
rec_get_offsets(clust_rec, clust_index, clust_offs,
clust_index->n_core_fields, ULINT_UNDEFINED, &heap);
if (clust_index->vers_history_row(clust_rec, clust_offs))
{
*same_trx= false;
goto end;
ulint trx_id_len;
const byte *trx_id= rec_get_nth_field(clust_rec, clust_offs,
clust_index->n_uniq, &trx_id_len);
ut_ad(trx_id_len == DATA_TRX_ID_LEN);
if (trx.id == trx_read_trx_id(trx_id))
ret= DB_FOREIGN_DUPLICATE_KEY;
}
if (UNIV_LIKELY_NULL(heap))
mem_heap_free(heap);
}
else
{
......@@ -2099,21 +2108,8 @@ dberr_t vers_row_same_trx(dict_index_t* index, const rec_t* rec,
" of table " << index->table->name << " is out of sync";
ut_ad("secondary index is out of sync" == 0);
ret= DB_TABLE_CORRUPT;
goto end;
}
trx_id_bytes= rec_get_nth_field(clust_rec, clust_offs,
clust_index->n_uniq, &trx_id_len);
ut_ad(trx_id_len == DATA_TRX_ID_LEN);
trx_id= trx_read_trx_id(trx_id_bytes);
if (UNIV_LIKELY_NULL(heap))
mem_heap_free(heap);
*same_trx= thr_get_trx(thr)->id == trx_id;
end:
mtr.commit();
return ret;
}
......@@ -2141,9 +2137,6 @@ row_ins_scan_sec_index_for_duplicate(
ulint n_fields_cmp;
btr_pcur_t pcur;
dberr_t err = DB_SUCCESS;
dberr_t err2;
bool same_trx;
ulint allow_duplicates;
rec_offs offsets_[REC_OFFS_SEC_INDEX_SIZE];
rec_offs* offsets = offsets_;
DBUG_ENTER("row_ins_scan_sec_index_for_duplicate");
......@@ -2181,7 +2174,7 @@ row_ins_scan_sec_index_for_duplicate(
: BTR_SEARCH_LEAF,
&pcur, mtr);
allow_duplicates = thr_get_trx(thr)->duplicates;
trx_t* const trx = thr_get_trx(thr);
/* Scan index records and check if there is a duplicate */
......@@ -2202,7 +2195,7 @@ row_ins_scan_sec_index_for_duplicate(
if (flags & BTR_NO_LOCKING_FLAG) {
/* Set no locks when applying log
in online table rebuild. */
} else if (allow_duplicates) {
} else if (trx->duplicates) {
/* If the SQL-query will update or replace
duplicate key we will take X-lock for
......@@ -2239,20 +2232,14 @@ row_ins_scan_sec_index_for_duplicate(
err = DB_DUPLICATE_KEY;
thr_get_trx(thr)->error_info = index;
if (index->table->versioned()) {
err2 = vers_row_same_trx(index, rec,
thr, &same_trx);
if (err2 != DB_SUCCESS) {
err = err2;
goto end_scan;
}
trx->error_info = index;
if (same_trx) {
err = DB_FOREIGN_DUPLICATE_KEY;
goto end_scan;
}
if (!index->table->versioned()) {
} else if (dberr_t e =
vers_row_same_trx(index, rec,
*trx)) {
err = e;
goto end_scan;
}
/* If the duplicate is on hidden FTS_DOC_ID,
......
......@@ -455,9 +455,10 @@ row_merge_buf_redundant_convert(
@param[in] new_table new table
@param[in,out] psort_info parallel sort info
@param[in,out] row table row
@param[in] history_row row is historical in a system-versioned table
@param[in] ext cache of externally stored
column prefixes, or NULL
@param[in] history_fts row is historical in a system-versioned table
on which a FTS_DOC_ID_INDEX(FTS_DOC_ID) exists
@param[in,out] doc_id Doc ID if we are creating
FTS index
@param[in,out] conv_heap memory heap where to allocate data when
......@@ -478,8 +479,8 @@ row_merge_buf_add(
const dict_table_t* new_table,
fts_psort_t* psort_info,
dtuple_t* row,
const bool history_row,
const row_ext_t* ext,
const bool history_fts,
doc_id_t* doc_id,
mem_heap_t* conv_heap,
dberr_t* err,
......@@ -542,7 +543,7 @@ row_merge_buf_add(
: NULL;
/* Process the Doc ID column */
if (!v_col && index->table->fts && (*doc_id || history_row)
if (!v_col && (history_fts || *doc_id)
&& col->ind == index->table->fts->doc_col) {
fts_write_doc_id((byte*) &write_doc_id, *doc_id);
......@@ -594,7 +595,7 @@ row_merge_buf_add(
/* Tokenize and process data for FTS */
if (!history_row && (index->type & DICT_FTS)) {
if (!history_fts && (index->type & DICT_FTS)) {
fts_doc_item_t* doc_item;
byte* value;
void* ptr;
......@@ -1707,7 +1708,6 @@ row_merge_read_clustered_index(
char new_sys_trx_end[8];
byte any_autoinc_data[8] = {0};
bool vers_update_trt = false;
bool history_row = false;
DBUG_ENTER("row_merge_read_clustered_index");
......@@ -1897,6 +1897,7 @@ row_merge_read_clustered_index(
dtuple_t* row;
row_ext_t* ext;
page_cur_t* cur = btr_pcur_get_page_cur(&pcur);
bool history_row, history_fts = false;
page_cur_move_to_next(cur);
......@@ -2135,11 +2136,10 @@ row_merge_read_clustered_index(
row_heap);
ut_ad(row);
if (new_table->versioned()) {
const dfield_t* dfield = dtuple_get_nth_field(
row, new_table->vers_end);
history_row = dfield->vers_history_row();
}
history_row = new_table->versioned()
&& dtuple_get_nth_field(row, new_table->vers_end)
->vers_history_row();
history_fts = history_row && new_table->fts;
for (ulint i = 0; i < n_nonnull; i++) {
dfield_t* field = &row->fields[nonnull[i]];
......@@ -2170,7 +2170,7 @@ row_merge_read_clustered_index(
}
/* Get the next Doc ID */
if (add_doc_id && !history_row) {
if (add_doc_id && !history_fts) {
doc_id++;
} else {
doc_id = 0;
......@@ -2329,7 +2329,7 @@ row_merge_read_clustered_index(
if (UNIV_LIKELY
(row && (rows_added = row_merge_buf_add(
buf, fts_index, old_table, new_table,
psort_info, row, history_row, ext,
psort_info, row, ext, history_fts,
&doc_id, conv_heap, &err,
&v_heap, eval_table, trx)))) {
......@@ -2662,8 +2662,8 @@ row_merge_read_clustered_index(
if (UNIV_UNLIKELY
(!(rows_added = row_merge_buf_add(
buf, fts_index, old_table,
new_table, psort_info, row,
history_row, ext, &doc_id,
new_table, psort_info,
row, ext, history_fts, &doc_id,
conv_heap, &err, &v_heap,
eval_table, trx)))) {
/* An empty buffer should have enough
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment