Commit 57737258 authored by Marko Mäkelä's avatar Marko Mäkelä

Merge 10.7 into 10.8

parents b3c25433 dbcbee10
......@@ -1001,12 +1001,30 @@ void ha_end_backup()
PLUGIN_IS_DELETED|PLUGIN_IS_READY, 0);
}
void handler::log_not_redoable_operation(const char *operation)
/*
Take a lock to block MDL_BACKUP_DDL (used by maria-backup) until
the DDL operation is taking place
*/
bool handler::log_not_redoable_operation(const char *operation)
{
DBUG_ENTER("log_not_redoable_operation");
if (table->s->tmp_table == NO_TMP_TABLE)
{
/*
Take a lock to ensure that mariadb-backup will notice the
new log entry (and re-copy the table if needed).
*/
THD *thd= table->in_use;
MDL_request mdl_backup;
backup_log_info ddl_log;
MDL_REQUEST_INIT(&mdl_backup, MDL_key::BACKUP, "", "", MDL_BACKUP_DDL,
MDL_STATEMENT);
if (thd->mdl_context.acquire_lock(&mdl_backup,
thd->variables.lock_wait_timeout))
DBUG_RETURN(1);
bzero(&ddl_log, sizeof(ddl_log));
lex_string_set(&ddl_log.query, operation);
/*
......@@ -1022,7 +1040,7 @@ void handler::log_not_redoable_operation(const char *operation)
ddl_log.org_table_id= table->s->tabledef_version;
backup_log_ddl(&ddl_log);
}
DBUG_VOID_RETURN;
DBUG_RETURN(0);
}
/*
......
......@@ -5123,7 +5123,7 @@ class handler :public Sql_alloc
return (lower_case_table_names == 2 && !(ha_table_flags() & HA_FILE_BASED));
}
void log_not_redoable_operation(const char *operation);
bool log_not_redoable_operation(const char *operation);
protected:
Handler_share *get_ha_share_ptr();
void set_ha_share_ptr(Handler_share *arg_ha_share);
......
......@@ -766,11 +766,12 @@ TRANSACTIONAL_TARGET static void trx_purge_truncate_history()
auto block= reinterpret_cast<buf_block_t*>(bpage);
if (!bpage->lock.x_lock_try())
{
rescan:
/* Let buf_pool_t::release_freed_page() proceed. */
mysql_mutex_unlock(&buf_pool.flush_list_mutex);
std::this_thread::yield();
mysql_mutex_lock(&buf_pool.mutex);
mysql_mutex_lock(&buf_pool.flush_list_mutex);
rescan:
mysql_mutex_unlock(&buf_pool.mutex);
bpage= UT_LIST_GET_LAST(buf_pool.flush_list);
continue;
}
......
......@@ -872,7 +872,7 @@ extern "C" {
int _ma_killed_ptr(HA_CHECK *param)
{
if (likely(thd_killed((THD*)param->thd)) == 0)
if (!param->thd || likely(thd_killed((THD*)param->thd)) == 0)
return 0;
my_errno= HA_ERR_ABORTED_BY_USER;
return 1;
......@@ -901,6 +901,7 @@ int _ma_killed_ptr(HA_CHECK *param)
void _ma_report_progress(HA_CHECK *param, ulonglong progress,
ulonglong max_progress)
{
if (param->thd)
thd_progress_report((THD*)param->thd,
progress + max_progress * param->stage,
max_progress * param->max_stage);
......@@ -2263,7 +2264,6 @@ void ha_maria::start_bulk_insert(ha_rows rows, uint flags)
{
bulk_insert_single_undo= BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR;
write_log_record_for_bulk_insert(file);
_ma_tmp_disable_logging_for_table(file, TRUE);
/*
Pages currently in the page cache have type PAGECACHE_LSN_PAGE, we
are not allowed to overwrite them with PAGECACHE_PLAIN_PAGE, so
......@@ -2271,8 +2271,12 @@ void ha_maria::start_bulk_insert(ha_rows rows, uint flags)
forced an UNDO which will for sure empty the table if we crash. The
upcoming unique-key insertions however need a proper index, so we
cannot leave the corrupted on-disk index file, thus we truncate it.
The following call will log the truncate and update the lsn for the table
to ensure that all redo's before this will be ignored.
*/
maria_delete_all_rows(file);
_ma_tmp_disable_logging_for_table(file, TRUE);
}
}
else if (!file->bulk_insert &&
......@@ -2303,23 +2307,58 @@ void ha_maria::start_bulk_insert(ha_rows rows, uint flags)
int ha_maria::end_bulk_insert()
{
int first_error, error;
my_bool abort= file->s->deleting;
int first_error, first_errno= 0, error;
my_bool abort= file->s->deleting, empty_table= 0;
uint enable_index_mode= HA_KEY_SWITCH_NONUNIQ_SAVE;
DBUG_ENTER("ha_maria::end_bulk_insert");
if ((first_error= maria_end_bulk_insert(file, abort)))
{
first_errno= my_errno;
abort= 1;
}
if ((error= maria_extra(file, HA_EXTRA_NO_CACHE, 0)))
{
first_error= first_error ? first_error : error;
if (!first_error)
{
first_error= error;
first_errno= my_errno;
}
abort= 1;
}
if (!abort && can_enable_indexes)
if ((error= enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE)))
first_error= first_error ? first_error : error;
if (bulk_insert_single_undo != BULK_INSERT_NONE)
{
if (log_not_redoable_operation("BULK_INSERT"))
{
/* Got lock timeout. revert back to empty file and give error */
if (!first_error)
{
first_error= 1;
first_errno= my_errno;
}
enable_index_mode= HA_KEY_SWITCH_ALL;
empty_table= 1;
/*
Ignore all changed pages, required by _ma_renable_logging_for_table()
*/
_ma_flush_table_files(file, MARIA_FLUSH_DATA|MARIA_FLUSH_INDEX,
FLUSH_IGNORE_CHANGED, FLUSH_IGNORE_CHANGED);
}
}
if (!abort && can_enable_indexes)
{
if ((error= enable_indexes(enable_index_mode)))
{
if (!first_error)
{
first_error= 1;
first_errno= my_errno;
}
}
}
if (bulk_insert_single_undo != BULK_INSERT_NONE)
{
/*
......@@ -2328,12 +2367,23 @@ int ha_maria::end_bulk_insert()
*/
if ((error= _ma_reenable_logging_for_table(file,
bulk_insert_single_undo ==
BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR)))
first_error= first_error ? first_error : error;
bulk_insert_single_undo= BULK_INSERT_NONE; // Safety
log_not_redoable_operation("BULK_INSERT");
BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR)) &&
!empty_table)
{
if (!first_error)
{
first_error= 1;
first_errno= my_errno;
}
}
bulk_insert_single_undo= BULK_INSERT_NONE; // Safety if called again
}
if (empty_table)
maria_delete_all_rows(file);
can_enable_indexes= 0;
if (first_error)
my_errno= first_errno;
DBUG_RETURN(first_error);
}
......
......@@ -64,6 +64,7 @@ int maria_delete_all_rows(MARIA_HA *info)
*/
LEX_CUSTRING log_array[TRANSLOG_INTERNAL_PARTS + 1];
uchar log_data[FILEID_STORE_SIZE];
my_bool error;
log_array[TRANSLOG_INTERNAL_PARTS + 0].str= log_data;
log_array[TRANSLOG_INTERNAL_PARTS + 0].length= sizeof(log_data);
if (unlikely(translog_write_record(&lsn, LOGREC_REDO_DELETE_ALL,
......@@ -78,6 +79,32 @@ int maria_delete_all_rows(MARIA_HA *info)
*/
if (_ma_mark_file_changed(share))
goto err;
/*
Because LOGREC_REDO_DELETE_ALL does not operate on pages, it has the
following problem:
delete_all; inserts (redo_insert); all pages get flushed; checkpoint:
the dirty pages list will be empty. In recovery, delete_all is executed,
but redo_insert are skipped (dirty pages list is empty).
To avoid this, we need to set skip_redo_lsn now, and thus need to sync
files.
Also fixes the problem of:
bulk insert; insert; delete_all; crash:
"bulk insert" is skipped (no REDOs), so if "insert" would not be skipped
(if we didn't update skip_redo_lsn below) then "insert" would be tried
and fail, saying that it sees that the first page has to be created
though the inserted row has rownr>0.
We use lsn-1 below to ensure that the above redo will be executed
*/
error= _ma_state_info_write(share,
MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET |
MA_STATE_INFO_WRITE_LOCK) ||
_ma_update_state_lsns(share, lsn-1, info->trn->trid, FALSE, FALSE) ||
_ma_sync_table_files(info);
info->trn->rec_lsn= LSN_IMPOSSIBLE;
if (error)
goto err;
}
else
{
......@@ -113,28 +140,9 @@ int maria_delete_all_rows(MARIA_HA *info)
if (log_record)
{
/*
Because LOGREC_REDO_DELETE_ALL does not operate on pages, it has the
following problem:
delete_all; inserts (redo_insert); all pages get flushed; checkpoint:
the dirty pages list will be empty. In recovery, delete_all is executed,
but redo_insert are skipped (dirty pages list is empty).
To avoid this, we need to set skip_redo_lsn now, and thus need to sync
files.
Also fixes the problem of:
bulk insert; insert; delete_all; crash:
"bulk insert" is skipped (no REDOs), so if "insert" would not be skipped
(if we didn't update skip_redo_lsn below) then "insert" would be tried
and fail, saying that it sees that the first page has to be created
though the inserted row has rownr>0.
*/
my_bool error= _ma_state_info_write(share,
MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET |
MA_STATE_INFO_WRITE_LOCK) ||
_ma_update_state_lsns(share, lsn, info->trn->trid, FALSE, FALSE) ||
_ma_sync_table_files(info);
info->trn->rec_lsn= LSN_IMPOSSIBLE;
if (error)
/* Update lsn to signal that the above redo does not have to be executed anymore */
if ( _ma_update_state_lsns(share, lsn, info->trn->trid, FALSE, FALSE) ||
_ma_sync_table_files(info))
goto err;
}
......
......@@ -1175,16 +1175,6 @@ prototype_redo_exec_hook(REDO_REPAIR_TABLE)
/* We try to get table first, so that we get the table in in the trace log */
info= get_MARIA_HA_from_REDO_record(rec);
if (skip_DDLs)
{
/*
REPAIR is not exactly a DDL, but it manipulates files without logging
insertions into them.
*/
tprint(tracef, "we skip DDLs\n");
DBUG_RETURN(0);
}
if (!info)
{
/* no such table, don't need to warn */
......@@ -1196,6 +1186,13 @@ prototype_redo_exec_hook(REDO_REPAIR_TABLE)
tprint(tracef, "we skip repairing crashed table\n");
DBUG_RETURN(0);
}
if (rec->lsn <= info->s->state.is_of_horizon)
{
DBUG_PRINT("info", ("Table is up to date, skipping redo"));
DBUG_RETURN(0);
}
/*
Otherwise, the mapping is newer than the table, and our record is newer
than the mapping, so we can repair.
......@@ -1560,7 +1557,6 @@ prototype_redo_exec_hook(REDO_INSERT_ROW_HEAD)
uchar *buff= NULL;
MARIA_HA *info= get_MARIA_HA_from_REDO_record(rec);
if (info == NULL || maria_is_crashed(info))
{
/*
Table was skipped at open time (because later dropped/renamed, not
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment