Commit d7d80689 authored by Sergei Petrunia's avatar Sergei Petrunia

MDEV-15101: Stop ANALYZE TABLE from flushing table definition cache

Apply this patch from Percona Server (amended for 10.5):

commit cd7201514fee78aaf7d3eb2b28d2573c76f53b84
Author: Laurynas Biveinis <laurynas.biveinis@gmail.com>
Date:   Tue Nov 14 06:34:19 2017 +0200

    Fix bug 1704195 / 87065 / TDB-83 (Stop ANALYZE TABLE from flushing table definition cache)

    Make ANALYZE TABLE stop flushing affected tables from the table
    definition cache, which has the effect of not blocking any subsequent
    new queries involving the table if there's a parallel long-running
    query:

    - new table flag HA_ONLINE_ANALYZE, return it for InnoDB and TokuDB
      tables;
    - in mysql_admin_table, if we are performing ANALYZE TABLE, and the
      table flag is set, do not remove the table from the table
      definition cache, do not invalidate query cache;
    - in partitioning handler, refresh the query optimizer statistics
      after ANALYZE if the underlying handler supports HA_ONLINE_ANALYZE;
    - new testcases main.percona_nonflushing_analyze_debug,
      parts.percona_nonflushing_abalyze_debug and a supporting debug sync
      point.

    For TokuDB, this change exposes bug TDB-83 (Index cardinality stats
    updated for handler::info(HA_STATUS_CONST), not often enough for
    tokudb_cardinality_scale_percent). TokuDB may return different
    rec_per_key values depending on dynamic variable
    tokudb_cardinality_scale_percent value. The server does not have a way
    of knowing that changing this variable invalidates the previous
    rec_per_key values in any opened table shares, and so does not call
    info(HA_STATUS_CONST) again. Fix by updating rec_per_key for both
    HA_STATUS_CONST and HA_STATUS_VARIABLE. This also forces a re-record
    of tokudb.bugs.db756_card_part_hash_1_pick, with the new output
    seeming to be more correct.
parent d34cc6b3
#
# Test ANALYZE TABLE that does not flush table definition cache
# Arguments:
# $percona_nonflushing_analyze_table - table to test
#
--source include/count_sessions.inc
--connect con1,localhost,root
SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
send_eval SELECT * FROM $percona_nonflushing_analyze_table;
--connection default
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
eval ANALYZE TABLE $percona_nonflushing_analyze_table;
# With the bug fixed this should not block
eval SELECT * FROM $percona_nonflushing_analyze_table;
SET DEBUG_SYNC="now SIGNAL finish_scan";
--connection con1
reap;
--disconnect con1
--connection default
SET DEBUG_SYNC='reset';
--source include/wait_until_count_sessions.inc
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1), (2), (3);
connect con1,localhost,root;
SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
SELECT * FROM t1;
connection default;
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SELECT * FROM t1;
a
1
2
3
SET DEBUG_SYNC="now SIGNAL finish_scan";
connection con1;
a
1
2
3
disconnect con1;
connection default;
SET DEBUG_SYNC='reset';
DROP TABLE t1;
--source include/have_debug_sync.inc
--source include/have_innodb.inc
#set use_stat_tables='preferably_for_queries';
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1), (2), (3);
--let $percona_nonflushing_analyze_table= t1
--source include/percona_nonflushing_analyze_debug.inc
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB
PARTITION BY RANGE (a) (
PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (10));
INSERT INTO t1 VALUES (1), (2), (3), (4);
connect con1,localhost,root;
SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
SELECT * FROM t1;
connection default;
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SELECT * FROM t1;
a
1
2
3
4
SET DEBUG_SYNC="now SIGNAL finish_scan";
connection con1;
a
1
2
3
4
disconnect con1;
connection default;
SET DEBUG_SYNC='reset';
DROP TABLE t1;
CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB
PARTITION BY RANGE (a)
SUBPARTITION BY HASH (A)
SUBPARTITIONS 2 (
PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (10));
INSERT INTO t2 VALUES (1), (2), (3), (4);
connect con1,localhost,root;
SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
SELECT * FROM t2;
connection default;
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT * FROM t2;
a
1
2
3
4
SET DEBUG_SYNC="now SIGNAL finish_scan";
connection con1;
a
1
2
3
4
disconnect con1;
connection default;
SET DEBUG_SYNC='reset';
DROP TABLE t2;
--source include/have_debug_sync.inc
--source include/have_innodb.inc
--source include/have_partition.inc
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB
PARTITION BY RANGE (a) (
PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (10));
INSERT INTO t1 VALUES (1), (2), (3), (4);
--let $percona_nonflushing_analyze_table= t1
--source include/percona_nonflushing_analyze_debug.inc
DROP TABLE t1;
CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=InnoDB
PARTITION BY RANGE (a)
SUBPARTITION BY HASH (A)
SUBPARTITIONS 2 (
PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (10));
INSERT INTO t2 VALUES (1), (2), (3), (4);
--let $percona_nonflushing_analyze_table= t2
--source include/percona_nonflushing_analyze_debug.inc
DROP TABLE t2;
......@@ -1192,7 +1192,17 @@ int ha_partition::analyze(THD *thd, HA_CHECK_OPT *check_opt)
{
DBUG_ENTER("ha_partition::analyze");
DBUG_RETURN(handle_opt_partitions(thd, check_opt, ANALYZE_PARTS));
int result= handle_opt_partitions(thd, check_opt, ANALYZE_PARTS);
if ((result == 0) && m_file[0]
&& (m_file[0]->ha_table_flags() & HA_ONLINE_ANALYZE))
{
/* If this is ANALYZE TABLE that will not force table definition cache
eviction, update statistics for the partition handler. */
this->info(HA_STATUS_CONST | HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
}
DBUG_RETURN(result);
}
......
......@@ -3103,6 +3103,9 @@ int handler::ha_index_next(uchar * buf)
table->update_virtual_fields(this, VCOL_UPDATE_FOR_READ);
}
table->status=result ? STATUS_NOT_FOUND: 0;
DEBUG_SYNC(ha_thd(), "handler_ha_index_next_end");
DBUG_RETURN(result);
}
......
......@@ -342,7 +342,15 @@ enum chf_create_flags {
/* Support native hash index */
#define HA_CAN_HASH_KEYS (1ULL << 57)
#define HA_CRASH_SAFE (1ULL << 58)
#define HA_LAST_TABLE_FLAG HA_CRASH_SAFE
/*
There is no need to evict the table from the table definition cache having
run ANALYZE TABLE on it
*/
#define HA_ONLINE_ANALYZE (1ULL << 59)
#define HA_LAST_TABLE_FLAG HA_ONLINE_ANALYZE
/* bits in index_flags(index_number) for what you can do with index */
#define HA_READ_NEXT 1 /* TODO really use this flag */
......
......@@ -1141,6 +1141,13 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
}
if (table->table && !table->view)
{
/*
Don't skip flushing if we are collecting EITS statistics.
*/
const bool skip_flush=
(operator_func == &handler::ha_analyze) &&
(table->table->file->ha_table_flags() & HA_ONLINE_ANALYZE) &&
!collect_eis;
if (table->table->s->tmp_table)
{
/*
......@@ -1150,7 +1157,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
if (open_for_modify && !open_error)
table->table->file->info(HA_STATUS_CONST);
}
else if (open_for_modify || fatal_error)
else if ((!skip_flush && open_for_modify) || fatal_error)
{
table->table->s->tdc->flush_unused(true);
/*
......
......@@ -2669,6 +2669,7 @@ ha_innobase::ha_innobase(
| HA_CAN_FULLTEXT_HINTS
*/
| HA_CAN_EXPORT
| HA_ONLINE_ANALYZE
| HA_CAN_RTREEKEYS
| HA_CAN_TABLES_WITHOUT_ROLLBACK
| HA_CAN_ONLINE_BACKUPS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment