Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
366bb162
Commit
366bb162
authored
Nov 21, 2017
by
Marko Mäkelä
Browse files
Options
Browse Files
Download
Plain Diff
Merge 10.2 into bb-10.2-ext
parents
563f1d89
375caf99
Changes
24
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
24 changed files
with
2942 additions
and
98 deletions
+2942
-98
mysql-test/suite/innodb/include/innodb_bulk_create_index.inc
mysql-test/suite/innodb/include/innodb_bulk_create_index.inc
+185
-0
mysql-test/suite/innodb/include/innodb_bulk_create_index_debug.inc
...t/suite/innodb/include/innodb_bulk_create_index_debug.inc
+221
-0
mysql-test/suite/innodb/r/innodb_bulk_create_index.result
mysql-test/suite/innodb/r/innodb_bulk_create_index.result
+1037
-0
mysql-test/suite/innodb/r/innodb_bulk_create_index_debug.result
...test/suite/innodb/r/innodb_bulk_create_index_debug.result
+485
-0
mysql-test/suite/innodb/r/innodb_bulk_create_index_flush.result
...test/suite/innodb/r/innodb_bulk_create_index_flush.result
+54
-0
mysql-test/suite/innodb/r/innodb_bulk_create_index_replication.result
...uite/innodb/r/innodb_bulk_create_index_replication.result
+222
-0
mysql-test/suite/innodb/r/innodb_bulk_create_index_small.result
...test/suite/innodb/r/innodb_bulk_create_index_small.result
+139
-0
mysql-test/suite/innodb/r/truncate_restart.result
mysql-test/suite/innodb/r/truncate_restart.result
+13
-0
mysql-test/suite/innodb/t/innodb_bulk_create_index.test
mysql-test/suite/innodb/t/innodb_bulk_create_index.test
+46
-0
mysql-test/suite/innodb/t/innodb_bulk_create_index_debug.test
...l-test/suite/innodb/t/innodb_bulk_create_index_debug.test
+23
-0
mysql-test/suite/innodb/t/innodb_bulk_create_index_flush.test
...l-test/suite/innodb/t/innodb_bulk_create_index_flush.test
+75
-0
mysql-test/suite/innodb/t/innodb_bulk_create_index_replication.test
.../suite/innodb/t/innodb_bulk_create_index_replication.test
+182
-0
mysql-test/suite/innodb/t/innodb_bulk_create_index_small.test
...l-test/suite/innodb/t/innodb_bulk_create_index_small.test
+148
-0
mysql-test/suite/innodb/t/truncate_restart.test
mysql-test/suite/innodb/t/truncate_restart.test
+18
-0
storage/innobase/buf/buf0flu.cc
storage/innobase/buf/buf0flu.cc
+3
-5
storage/innobase/buf/buf0lru.cc
storage/innobase/buf/buf0lru.cc
+51
-71
storage/innobase/dict/dict0stats.cc
storage/innobase/dict/dict0stats.cc
+4
-1
storage/innobase/fil/fil0fil.cc
storage/innobase/fil/fil0fil.cc
+4
-1
storage/innobase/include/buf0flu.h
storage/innobase/include/buf0flu.h
+8
-3
storage/innobase/include/buf0lru.h
storage/innobase/include/buf0lru.h
+5
-3
storage/innobase/row/row0import.cc
storage/innobase/row/row0import.cc
+10
-5
storage/innobase/row/row0merge.cc
storage/innobase/row/row0merge.cc
+1
-1
storage/innobase/row/row0quiesce.cc
storage/innobase/row/row0quiesce.cc
+4
-1
storage/innobase/srv/srv0start.cc
storage/innobase/srv/srv0start.cc
+4
-7
No files found.
mysql-test/suite/innodb/include/innodb_bulk_create_index.inc
0 → 100644
View file @
366bb162
#
# wl#7277: InnoDB: Bulk Load for Create Index
#
# Create Insert Procedure
DELIMITER
|
;
CREATE
PROCEDURE
populate_t1
(
load_even
INT
)
BEGIN
DECLARE
i
int
DEFAULT
1
;
START
TRANSACTION
;
WHILE
(
i
<=
10000
)
DO
IF
i
%
2
=
0
AND
load_even
=
1
THEN
INSERT
INTO
t1
VALUES
(
i
,
i
,
CONCAT
(
'a'
,
i
));
END
IF
;
IF
i
%
2
!=
0
AND
load_even
!=
1
THEN
INSERT
INTO
t1
VALUES
(
i
,
i
,
CONCAT
(
'a'
,
i
));
END
IF
;
SET
i
=
i
+
1
;
END
WHILE
;
COMMIT
;
END
|
DELIMITER
;
|
SELECT
@@
innodb_fill_factor
;
if
(
$row_format
!=
'COMPRESSED'
)
{
eval
CREATE
TABLE
t1
(
class
INT
,
id
INT
,
title
VARCHAR
(
100
)
)
ENGINE
=
InnoDB
ROW_FORMAT
=
$row_format
;
}
if
(
$row_format
==
'COMPRESSED'
)
{
SET
GLOBAL
innodb_file_per_table
=
1
;
eval
CREATE
TABLE
t1
(
class
INT
,
id
INT
,
title
VARCHAR
(
100
)
)
ENGINE
=
InnoDB
ROW_FORMAT
=
$row_format
KEY_BLOCK_SIZE
=
4
;
}
--
disable_query_log
# Load half records
CALL
populate_t1
(
1
);
--
enable_query_log
SELECT
COUNT
(
*
)
FROM
t1
;
/* Create index. */
CREATE
INDEX
idx_id
ON
t1
(
id
);
CREATE
INDEX
idx_title
ON
t1
(
title
);
/* Check table. */
CHECK
TABLE
t1
;
/* Select by index. */
EXPLAIN
SELECT
*
FROM
t1
WHERE
id
=
10
;
EXPLAIN
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
id
=
10
;
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
id
=
5000
;
SELECT
*
FROM
t1
WHERE
title
=
'a5000'
;
SELECT
*
FROM
t1
WHERE
id
=
10000
;
SELECT
*
FROM
t1
WHERE
title
=
'a10000'
;
SELECT
*
FROM
t1
WHERE
id
=
10010
;
SELECT
*
FROM
t1
WHERE
title
=
'a10010'
;
/*Insert/Update/Delete. */
DELETE
FROM
t1
WHERE
id
<
4010
AND
id
>
3990
;
INSERT
INTO
t1
VALUES
(
4000
,
4000
,
'b4000'
);
UPDATE
t1
SET
title
=
CONCAT
(
'b'
,
id
)
WHERE
id
<
3010
AND
id
>
2990
;
SELECT
*
FROM
t1
WHERE
id
=
3000
;
SELECT
*
FROM
t1
WHERE
title
=
'a3000'
;
SELECT
*
FROM
t1
WHERE
title
=
'b3000'
;
SELECT
*
FROM
t1
WHERE
id
=
4000
;
SELECT
*
FROM
t1
WHERE
title
=
'a4000'
;
SELECT
*
FROM
t1
WHERE
title
=
'b4000'
;
SELECT
*
FROM
t1
WHERE
id
=
4001
;
SELECT
*
FROM
t1
WHERE
title
=
'a4001'
;
--
disable_query_log
# Load half records (follow up load)
CALL
populate_t1
(
0
);
--
enable_query_log
SELECT
COUNT
(
*
)
FROM
t1
;
/* Add column. */
ALTER
TABLE
t1
ADD
COLUMN
content
TEXT
;
CHECK
TABLE
t1
;
SELECT
*
FROM
t1
WHERE
id
=
10
;
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
id
=
5000
;
SELECT
*
FROM
t1
WHERE
title
=
'a5000'
;
SELECT
*
FROM
t1
WHERE
id
=
10000
;
SELECT
*
FROM
t1
WHERE
title
=
'a10000'
;
SELECT
*
FROM
t1
WHERE
id
=
10010
;
SELECT
*
FROM
t1
WHERE
title
=
'a10010'
;
/* Drop column. */
ALTER
TABLE
t1
DROP
COLUMN
content
;
CHECK
TABLE
t1
;
SELECT
*
FROM
t1
WHERE
id
=
10
;
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
id
=
5000
;
SELECT
*
FROM
t1
WHERE
title
=
'a5000'
;
SELECT
*
FROM
t1
WHERE
id
=
10000
;
SELECT
*
FROM
t1
WHERE
title
=
'a10000'
;
SELECT
*
FROM
t1
WHERE
id
=
10010
;
SELECT
*
FROM
t1
WHERE
title
=
'a10010'
;
DROP
TABLE
t1
;
# Test Blob
if
(
$row_format
!=
'COMPRESSED'
)
{
eval
CREATE
TABLE
t1
(
a
INT
PRIMARY
KEY
,
b
TEXT
,
c
TEXT
)
ENGINE
=
InnoDB
ROW_FORMAT
=
$row_format
;
}
if
(
$row_format
==
'COMPRESSED'
)
{
eval
CREATE
TABLE
t1
(
a
INT
PRIMARY
KEY
,
b
BLOB
,
c
TEXT
)
ENGINE
=
InnoDB
ROW_FORMAT
=
$row_format
KEY_BLOCK_SIZE
=
4
;
}
let
$cnt
=
5000
;
--
disable_query_log
WHILE
(
$cnt
>=
4950
)
{
EVAL
INSERT
INTO
t1
VALUES
(
$cnt
,
REPEAT
(
CONCAT
(
'a'
,
$cnt
),
2000
),
CONCAT
(
'a'
,
$cnt
));
dec
$cnt
;
}
--
enable_query_log
ALTER
TABLE
t1
ADD
INDEX
`idx`
(
a
,
b
(
5
));
SELECT
CHAR_LENGTH
(
b
)
FROM
t1
WHERE
a
=
4975
;
SELECT
b
=
REPEAT
(
CONCAT
(
'a'
,
4975
),
2000
)
FROM
t1
WHERE
a
=
4975
AND
b
like
'a4975%'
;
UPDATE
t1
SET
b
=
REPEAT
(
CONCAT
(
'b'
,
4975
),
2000
)
WHERE
a
=
4975
AND
b
like
'a4975%'
;
SELECT
b
=
REPEAT
(
CONCAT
(
'a'
,
4975
),
2000
)
FROM
t1
WHERE
a
=
4975
AND
b
like
'a4975%'
;
SELECT
b
=
REPEAT
(
CONCAT
(
'b'
,
4975
),
2000
)
FROM
t1
WHERE
a
=
4975
AND
b
like
'b4975%'
;
DELETE
FROM
t1
WHERE
a
=
4975
AND
b
like
'b4975%'
;
SELECT
b
=
REPEAT
(
CONCAT
(
'b'
,
4975
),
2000
)
FROM
t1
WHERE
a
=
4975
AND
b
like
'b4975%'
;
ALTER
TABLE
t1
DROP
COLUMN
c
;
CHECK
TABLE
t1
;
SELECT
CHAR_LENGTH
(
b
)
FROM
t1
WHERE
a
=
4975
;
DROP
TABLE
t1
;
# Restore global variables
if
(
$row_format
==
'COMPRESSED'
)
{
SET
GLOBAL
innodb_file_per_table
=
default
;
}
DROP
PROCEDURE
populate_t1
;
mysql-test/suite/innodb/include/innodb_bulk_create_index_debug.inc
0 → 100644
View file @
366bb162
#
# wl#7277: InnoDB: Bulk Load for Create Index
#
# Not supported in embedded
--
source
include
/
not_embedded
.
inc
# This test case needs to crash the server. Needs a debug server.
--
source
include
/
have_debug
.
inc
# Don't test this under valgrind, memory leaks will occur.
--
source
include
/
not_valgrind
.
inc
# Avoid CrashReporter popup on Mac
--
source
include
/
not_crashrep
.
inc
--
source
include
/
have_innodb
.
inc
# Create Insert Procedure
DELIMITER
|
;
CREATE
PROCEDURE
populate_t1
()
BEGIN
DECLARE
i
int
DEFAULT
1
;
START
TRANSACTION
;
WHILE
(
i
<=
10000
)
DO
INSERT
INTO
t1
VALUES
(
i
,
i
,
CONCAT
(
'a'
,
i
));
SET
i
=
i
+
1
;
END
WHILE
;
COMMIT
;
END
|
DELIMITER
;
|
# Test scenarios:
# 1. Test restart;
# 2. Test crash recovery.
# Test Restart
if
(
$row_format
!=
'COMPRESSED'
)
{
eval
CREATE
TABLE
t1
(
class
INT
,
id
INT
,
title
VARCHAR
(
100
)
)
ENGINE
=
InnoDB
ROW_FORMAT
=
$row_format
;
}
if
(
$row_format
==
'COMPRESSED'
)
{
SET
GLOBAL
innodb_file_per_table
=
1
;
eval
CREATE
TABLE
t1
(
class
INT
,
id
INT
,
title
VARCHAR
(
100
)
)
ENGINE
=
InnoDB
ROW_FORMAT
=
$row_format
KEY_BLOCK_SIZE
=
4
;
}
--
disable_query_log
CALL
populate_t1
();
--
enable_query_log
SELECT
COUNT
(
*
)
FROM
t1
;
CREATE
INDEX
idx_title
ON
t1
(
title
);
--
source
include
/
restart_mysqld
.
inc
CHECK
TABLE
t1
;
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
title
=
'a5000'
;
SELECT
*
FROM
t1
WHERE
title
=
'a10000'
;
SELECT
*
FROM
t1
WHERE
title
=
'a10010'
;
DROP
TABLE
t1
;
--
echo
# Test Blob
if
(
$row_format
!=
'COMPRESSED'
)
{
eval
CREATE
TABLE
t1
(
a
INT
PRIMARY
KEY
,
b
TEXT
,
c
TEXT
)
ENGINE
=
InnoDB
ROW_FORMAT
=
$row_format
;
}
if
(
$row_format
==
'COMPRESSED'
)
{
SET
GLOBAL
innodb_file_per_table
=
1
;
eval
CREATE
TABLE
t1
(
a
INT
PRIMARY
KEY
,
b
TEXT
,
c
TEXT
)
ENGINE
=
InnoDB
ROW_FORMAT
=
$row_format
KEY_BLOCK_SIZE
=
4
;
}
INSERT
INTO
t1
VALUES
(
1
,
REPEAT
(
'a'
,
10000
),
'a'
),
(
2
,
REPEAT
(
'b'
,
20000
),
'b'
),
(
3
,
REPEAT
(
'c'
,
40000
),
'c'
),
(
4
,
REPEAT
(
'd'
,
60000
),
'd'
);
SELECT
CHAR_LENGTH
(
b
)
FROM
t1
;
ALTER
TABLE
t1
DROP
COLUMN
c
;
--
source
include
/
restart_mysqld
.
inc
CHECK
TABLE
t1
;
SELECT
CHAR_LENGTH
(
b
)
FROM
t1
;
DROP
TABLE
t1
;
# Test Crash Recovery
if
(
$row_format
!=
'COMPRESSED'
)
{
eval
CREATE
TABLE
t1
(
class
INT
,
id
INT
,
title
VARCHAR
(
100
)
)
ENGINE
=
InnoDB
ROW_FORMAT
=
$row_format
;
}
if
(
$row_format
==
'COMPRESSED'
)
{
SET
GLOBAL
innodb_file_per_table
=
1
;
eval
CREATE
TABLE
t1
(
class
INT
,
id
INT
,
title
VARCHAR
(
100
)
)
ENGINE
=
InnoDB
ROW_FORMAT
=
$row_format
KEY_BLOCK_SIZE
=
4
;
}
--
disable_query_log
CALL
populate_t1
();
--
enable_query_log
SET
debug_dbug
=
'+d,crash_commit_before'
;
# Write file to make mysql-test-run.pl start up the server again
--
exec
echo
"restart"
>
$MYSQLTEST_VARDIR
/
tmp
/
mysqld
.
1.
expect
--
error
2013
CREATE
INDEX
idx_title
ON
t1
(
title
);
--
enable_reconnect
--
source
include
/
wait_until_connected_again
.
inc
--
disable_reconnect
SELECT
COUNT
(
*
)
FROM
t1
;
CHECK
TABLE
t1
;
EXPLAIN
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
title
=
'a5000'
;
SELECT
*
FROM
t1
WHERE
title
=
'a10000'
;
SELECT
*
FROM
t1
WHERE
title
=
'a10010'
;
DROP
TABLE
t1
;
--
echo
# Test Blob
if
(
$row_format
!=
'COMPRESSED'
)
{
eval
CREATE
TABLE
t1
(
a
INT
PRIMARY
KEY
,
b
TEXT
,
c
TEXT
)
ENGINE
=
InnoDB
ROW_FORMAT
=
$row_format
;
}
if
(
$row_format
==
'COMPRESSED'
)
{
SET
GLOBAL
innodb_file_per_table
=
1
;
eval
CREATE
TABLE
t1
(
a
INT
PRIMARY
KEY
,
b
TEXT
,
c
TEXT
)
ENGINE
=
InnoDB
ROW_FORMAT
=
$row_format
KEY_BLOCK_SIZE
=
4
;
}
INSERT
INTO
t1
VALUES
(
1
,
REPEAT
(
'a'
,
10000
),
'a'
),
(
2
,
REPEAT
(
'b'
,
20000
),
'b'
),
(
3
,
REPEAT
(
'c'
,
40000
),
'c'
),
(
4
,
REPEAT
(
'd'
,
60000
),
'd'
);
SELECT
CHAR_LENGTH
(
b
)
FROM
t1
;
SET
debug_dbug
=
'+d,crash_commit_before'
;
# Write file to make mysql-test-run.pl start up the server again
--
exec
echo
"restart"
>
$MYSQLTEST_VARDIR
/
tmp
/
mysqld
.
1.
expect
--
error
2013
ALTER
TABLE
t1
DROP
COLUMN
c
;
--
enable_reconnect
--
source
include
/
wait_until_connected_again
.
inc
--
disable_reconnect
CHECK
TABLE
t1
;
SELECT
CHAR_LENGTH
(
b
)
FROM
t1
;
DROP
TABLE
t1
;
# Restore global variables
if
(
$row_format
==
'COMPRESSED'
)
{
SET
GLOBAL
innodb_file_per_table
=
default
;
}
DROP
PROCEDURE
populate_t1
;
mysql-test/suite/innodb/r/innodb_bulk_create_index.result
0 → 100644
View file @
366bb162
This diff is collapsed.
Click to expand it.
mysql-test/suite/innodb/r/innodb_bulk_create_index_debug.result
0 → 100644
View file @
366bb162
This diff is collapsed.
Click to expand it.
mysql-test/suite/innodb/r/innodb_bulk_create_index_flush.result
0 → 100644
View file @
366bb162
CREATE PROCEDURE populate_t1()
BEGIN
DECLARE i int DEFAULT 1;
START TRANSACTION;
WHILE (i <= 10000) DO
INSERT INTO t1 VALUES (i, i, CONCAT('a', i));
SET i = i + 1;
END WHILE;
COMMIT;
END|
CREATE TABLE t1(
class INT,
id INT,
title VARCHAR(100)
) ENGINE=InnoDB;
SELECT COUNT(*) FROM t1;
COUNT(*)
10000
SET @saved_dbug= @@SESSION.debug_dbug;
SET debug_dbug='+d,ib_index_build_fail_before_flush';
CREATE INDEX idx_id ON t1(id);
ERROR 70100: Query execution was interrupted
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
CREATE INDEX idx_title ON t1(title);
ERROR 70100: Query execution was interrupted
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
CREATE FULLTEXT INDEX fidx_title ON t1(title);
ERROR 70100: Query execution was interrupted
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
ALTER TABLE t1 ADD COLUMN content TEXT;
ERROR 70100: Query execution was interrupted
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
SET debug_dbug= @saved_dbug;
INSERT INTO t1 VALUES(10001, 10001, 'a10000');
ALTER TABLE t1 ADD UNIQUE INDEX idx_title(title);
ERROR 23000: Duplicate entry 'a10000' for key 'idx_title'
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
ALTER TABLE t1 ADD UNIQUE INDEX idx_id(id), ADD UNIQUE INDEX idx_title(title);
ERROR 23000: Duplicate entry 'a10000' for key 'idx_title'
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
DROP TABLE t1;
DROP PROCEDURE populate_t1;
mysql-test/suite/innodb/r/innodb_bulk_create_index_replication.result
0 → 100644
View file @
366bb162
include/master-slave.inc
[connection master]
connection master;
CREATE PROCEDURE populate_t1(load_even INT)
BEGIN
DECLARE i int DEFAULT 1;
START TRANSACTION;
WHILE (i <= 100) DO
IF i%2 = 0 AND load_even = 1 THEN
INSERT INTO t1 VALUES (i, i, CONCAT('a', i));
END IF;
IF i%2 != 0 AND load_even != 1 THEN
INSERT INTO t1 VALUES (i, i, CONCAT('a', i));
END IF;
SET i = i + 1;
END WHILE;
COMMIT;
END|
CREATE TABLE t1(
class INT,
id INT,
title VARCHAR(100)
) ENGINE=InnoDB ;
SELECT COUNT(*) FROM t1;
COUNT(*)
50
/* Create index. */
CREATE INDEX idx_id ON t1(id);
CREATE INDEX idx_title ON t1(title);
/* Select by index. */
EXPLAIN SELECT * FROM t1 WHERE id = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref idx_id idx_id 5 const 1
EXPLAIN SELECT * FROM t1 WHERE title = 'a10';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref idx_title idx_title 103 const 1 Using index condition
SELECT * FROM t1 WHERE id = 10;
class id title
10 10 a10
SELECT * FROM t1 WHERE title = 'a10';
class id title
10 10 a10
SELECT * FROM t1 WHERE id = 20;
class id title
20 20 a20
SELECT * FROM t1 WHERE title = 'a20';
class id title
20 20 a20
SELECT * FROM t1 WHERE id = 30;
class id title
30 30 a30
SELECT * FROM t1 WHERE title = 'a30';
class id title
30 30 a30
SELECT * FROM t1 WHERE id = 101;
class id title
SELECT * FROM t1 WHERE title = 'a101';
class id title
/*Insert/Update/Delete. */
DELETE FROM t1 WHERE id < 40 AND id > 30;
INSERT INTO t1 VALUES(38, 38, 'b38');
UPDATE t1 SET title = CONCAT('b', id) WHERE id < 30 AND id > 20;
SELECT * FROM t1 WHERE id = 28;
class id title
28 28 b28
SELECT * FROM t1 WHERE title = 'a28';
class id title
SELECT * FROM t1 WHERE title = 'b28';
class id title
28 28 b28
SELECT * FROM t1 WHERE id = 38;
class id title
38 38 b38
SELECT * FROM t1 WHERE title = 'a38';
class id title
SELECT * FROM t1 WHERE title = 'b38';
class id title
38 38 b38
SELECT * FROM t1 WHERE id = 101;
class id title
SELECT * FROM t1 WHERE title = 'a101';
class id title
SELECT COUNT(*) FROM t1;
COUNT(*)
97
SELECT * FROM t1 WHERE id = 10;
class id title
10 10 a10
SELECT * FROM t1 WHERE title = 'a10';
class id title
10 10 a10
SELECT * FROM t1 WHERE id = 20;
class id title
20 20 a20
SELECT * FROM t1 WHERE title = 'a20';
class id title
20 20 a20
SELECT * FROM t1 WHERE id = 30;
class id title
30 30 a30
SELECT * FROM t1 WHERE title = 'a30';
class id title
30 30 a30
SELECT * FROM t1 WHERE id = 101;
class id title
SELECT * FROM t1 WHERE title = 'a101';
class id title
CREATE TABLE t_part (
class INT ,
id INT ,
title VARCHAR(30)
) ENGINE=InnoDB
PARTITION BY RANGE(id)
SUBPARTITION BY KEY(id)
SUBPARTITIONS 4
(
PARTITION p0 VALUES LESS THAN (5000),
PARTITION p1 VALUES LESS THAN (MAXVALUE)
);
INSERT INTO t_part SELECT * FROM t1;
ALTER TABLE t_part ADD INDEX `idx` (class,id,title(10));
SELECT * FROM t_part WHERE id = 10;
class id title
10 10 a10
SELECT * FROM t_part WHERE title = 'a10';
class id title
10 10 a10
SELECT * FROM t_part WHERE id = 20;
class id title
20 20 a20
SELECT * FROM t_part WHERE title = 'a20';
class id title
20 20 a20
SELECT * FROM t_part WHERE id = 30;
class id title
30 30 a30
SELECT * FROM t_part WHERE title = 'a30';
class id title
30 30 a30
SELECT * FROM t_part WHERE id = 101;
class id title
SELECT * FROM t_part WHERE title = 'a101';
class id title
include/sync_slave_sql_with_master.inc
connection slave;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`class` int(11) DEFAULT NULL,
`id` int(11) DEFAULT NULL,
`title` varchar(100) DEFAULT NULL,
KEY `idx_id` (`id`),
KEY `idx_title` (`title`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
SHOW CREATE TABLE t_part;
Table Create Table
t_part CREATE TABLE `t_part` (
`class` int(11) DEFAULT NULL,
`id` int(11) DEFAULT NULL,
`title` varchar(30) DEFAULT NULL,
KEY `idx` (`class`,`id`,`title`(10))
) ENGINE=InnoDB DEFAULT CHARSET=latin1
PARTITION BY RANGE (`id`)
SUBPARTITION BY KEY (`id`)
SUBPARTITIONS 4
(PARTITION `p0` VALUES LESS THAN (5000) ENGINE = InnoDB,
PARTITION `p1` VALUES LESS THAN MAXVALUE ENGINE = InnoDB)
SELECT COUNT(*) FROM t1;
COUNT(*)
97
SELECT COUNT(*) FROM t_part;
COUNT(*)
97
SELECT * FROM t1 WHERE id = 10;
class id title
10 10 a10
SELECT * FROM t1 WHERE title = 'a10';
class id title
10 10 a10
SELECT * FROM t1 WHERE id = 20;
class id title
20 20 a20
SELECT * FROM t1 WHERE title = 'a20';
class id title
20 20 a20
SELECT * FROM t1 WHERE id = 30;
class id title
30 30 a30
SELECT * FROM t1 WHERE title = 'a30';
class id title
30 30 a30
SELECT * FROM t1 WHERE id = 101;
class id title
SELECT * FROM t1 WHERE title = 'a101';
class id title
SELECT * FROM t_part WHERE id = 10;
class id title
10 10 a10
SELECT * FROM t_part WHERE title = 'a10';
class id title
10 10 a10
SELECT * FROM t_part WHERE id = 20;
class id title
20 20 a20
SELECT * FROM t_part WHERE title = 'a20';
class id title
20 20 a20
SELECT * FROM t_part WHERE id = 30;
class id title
30 30 a30
SELECT * FROM t_part WHERE title = 'a30';
class id title
30 30 a30
SELECT * FROM t_part WHERE id = 101;
class id title
SELECT * FROM t_part WHERE title = 'a101';
class id title
connection master;
DROP PROCEDURE populate_t1;
DROP TABLE t1;
DROP TABLE t_part;
include/rpl_end.inc
mysql-test/suite/innodb/r/innodb_bulk_create_index_small.result
0 → 100644
View file @
366bb162
CREATE PROCEDURE populate_t1()
BEGIN
DECLARE i int DEFAULT 1;
START TRANSACTION;
WHILE (i <= 1000) DO
INSERT INTO t1 VALUES (i, i, CONCAT('a', i));
SET i = i + 1;
END WHILE;
COMMIT;
END|
SELECT @@innodb_fill_factor;
@@innodb_fill_factor
100
CREATE TABLE t1(
class INT,
id INT,
title VARCHAR(100)
) ENGINE=InnoDB ROW_FORMAT=COMPACT;
SELECT COUNT(*) FROM t1;
COUNT(*)
1000
/* Create index. */
CREATE INDEX idx_id ON t1(id);
CREATE INDEX idx_title ON t1(title);
/* Check table. */
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
/* Select by index. */
EXPLAIN SELECT * FROM t1 WHERE id = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref idx_id idx_id 5 const 1
EXPLAIN SELECT * FROM t1 WHERE title = 'a10';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref idx_title idx_title 103 const 1 Using index condition
SELECT * FROM t1 WHERE id = 10;
class id title
10 10 a10
SELECT * FROM t1 WHERE title = 'a10';
class id title
10 10 a10
SELECT * FROM t1 WHERE id = 500;
class id title
500 500 a500
SELECT * FROM t1 WHERE title = 'a500';
class id title
500 500 a500
SELECT * FROM t1 WHERE id = 1000;
class id title
1000 1000 a1000
SELECT * FROM t1 WHERE title = 'a1000';
class id title
1000 1000 a1000
SELECT * FROM t1 WHERE id = 1010;
class id title
SELECT * FROM t1 WHERE title = 'a1010';
class id title
DROP TABLE t1;
CREATE TABLE t1(
a INT PRIMARY KEY,
b TEXT,
c TEXT) ENGINE=InnoDB ROW_FORMAT=COMPACT;
INSERT INTO t1 VALUES
(1, REPEAT('a',10000), 'a'),
(2, REPEAT('b',20000), 'b'),
(3, REPEAT('c',40000), 'c'),
(4, REPEAT('d',60000), 'd');
ALTER TABLE t1 DROP COLUMN c;
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
SELECT CHAR_LENGTH(b) FROM t1 WHERE a=4975;
CHAR_LENGTH(b)
DROP TABLE t1;
SET GLOBAL innodb_file_per_table=default;
SET GLOBAL innodb_file_per_table=1;
CREATE TABLE t1(
class INT,
id INT,
title VARCHAR(100)
) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
SELECT COUNT(*) FROM t1;
COUNT(*)
1000
/* Create index. */
CREATE INDEX idx_id ON t1(id);
CREATE INDEX idx_title ON t1(title);
/* Check table. */
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
/* Select by index. */
EXPLAIN SELECT * FROM t1 WHERE id = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref idx_id idx_id 5 const 1
EXPLAIN SELECT * FROM t1 WHERE title = 'a10';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref idx_title idx_title 103 const 1 Using index condition
SELECT * FROM t1 WHERE id = 10;
class id title
10 10 a10
SELECT * FROM t1 WHERE title = 'a10';
class id title
10 10 a10
SELECT * FROM t1 WHERE id = 500;
class id title
500 500 a500
SELECT * FROM t1 WHERE title = 'a500';
class id title
500 500 a500
SELECT * FROM t1 WHERE id = 1000;
class id title
1000 1000 a1000
SELECT * FROM t1 WHERE title = 'a1000';
class id title
1000 1000 a1000
SELECT * FROM t1 WHERE id = 1010;
class id title
SELECT * FROM t1 WHERE title = 'a1010';
class id title
DROP TABLE t1;
CREATE TABLE t1(
a INT PRIMARY KEY,
b TEXT,
c TEXT) ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
INSERT INTO t1 VALUES
(1, REPEAT('a',10000), 'a'),
(2, REPEAT('b',20000), 'b'),
(3, REPEAT('c',40000), 'c'),
(4, REPEAT('d',60000), 'd');
ALTER TABLE t1 DROP COLUMN c;
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
SELECT CHAR_LENGTH(b) FROM t1 WHERE a=4975;
CHAR_LENGTH(b)
DROP TABLE t1;
SET GLOBAL innodb_file_per_table=default;
DROP PROCEDURE populate_t1;
mysql-test/suite/innodb/r/truncate_restart.result
0 → 100644
View file @
366bb162
call mtr.add_suppression("InnoDB: Cannot save table statistics for table `test`\\.`t1`: Persistent statistics do not exist");
SET GLOBAL innodb_stats_persistent= ON;
CREATE TABLE t1 (t TEXT) ENGINE=InnoDB;
connect con1,localhost,root,,test;
SET DEBUG_SYNC='ib_trunc_table_trunc_completing SIGNAL committed WAIT_FOR ever';
TRUNCATE TABLE t1;
connection default;
SET DEBUG_SYNC='now WAIT_FOR committed';
disconnect con1;
SELECT COUNT(*) FROM t1;
COUNT(*)
0
DROP TABLE t1;
mysql-test/suite/innodb/t/innodb_bulk_create_index.test
0 → 100644
View file @
366bb162
######## suite/innodb/t/innodb_bulk_create_index.test #####
# #
# Testcase for worklog WL#7277: InnoDB: Bulk Load for Create Index #
# The basic idea of bulk load is to build an index from bottom up #
# (also known as sorted index build). #
# Earlier index was create by repeatedly inserting records #
# Test scenario : #
# - Run bulk create index on 10K rows #
# - Run bulk create index on table with various row types #
# - Run DML and SELECT after bulk index creation #
# Creation: #
# 2014-06-19 Implemented this test as part of WL#7277 #
# #
######################################################################
--
source
include
/
not_embedded
.
inc
--
source
include
/
innodb_page_size_small
.
inc
--
source
include
/
big_test
.
inc
# Test Row Format: REDUNDANT.
let
$row_format
=
REDUNDANT
;
--
source
suite
/
innodb
/
include
/
innodb_bulk_create_index
.
inc
# Test Row Format: COMPACT.
let
$row_format
=
COMPACT
;
--
source
suite
/
innodb
/
include
/
innodb_bulk_create_index
.
inc
# Test Row Format: DYNAMIC.
let
$row_format
=
DYNAMIC
;
--
source
suite
/
innodb
/
include
/
innodb_bulk_create_index
.
inc
# Test Row Format: COMPRESSED.
let
$row_format
=
COMPRESSED
;
--
source
suite
/
innodb
/
include
/
innodb_bulk_create_index
.
inc
# Test Fill Factor: 10
let
$row_format
=
COMPACT
;
SET
GLOBAL
innodb_fill_factor
=
10
;
--
source
suite
/
innodb
/
include
/
innodb_bulk_create_index
.
inc
# Test Fill Factor: 50
let
$row_format
=
COMPACT
;
SET
GLOBAL
innodb_fill_factor
=
50
;
--
source
suite
/
innodb
/
include
/
innodb_bulk_create_index
.
inc
SET
GLOBAL
innodb_fill_factor
=
default
;
mysql-test/suite/innodb/t/innodb_bulk_create_index_debug.test
0 → 100644
View file @
366bb162
#
# wl#7277: InnoDB: Bulk Load for Create Index
#
# Test Restart & Crash Recovery.
--
source
include
/
big_test
.
inc
--
source
include
/
innodb_page_size_small
.
inc
# Test Row Format: REDUNDANT.
let
$row_format
=
REDUNDANT
;
--
source
suite
/
innodb
/
include
/
innodb_bulk_create_index_debug
.
inc
# Test Row Format: COMPACT.
let
$row_format
=
COMPACT
;
--
source
suite
/
innodb
/
include
/
innodb_bulk_create_index_debug
.
inc
# Test Row Format: DYNAMIC.
let
$row_format
=
DYNAMIC
;
--
source
suite
/
innodb
/
include
/
innodb_bulk_create_index_debug
.
inc
# Test Row Format: COMPRESSED.
let
$row_format
=
COMPRESSED
;
--
source
suite
/
innodb
/
include
/
innodb_bulk_create_index_debug
.
inc
mysql-test/suite/innodb/t/innodb_bulk_create_index_flush.test
0 → 100644
View file @
366bb162
#
# Test flush on error in bulk load to make sure we do a proper cleanup.
# Note: We flush all dirty pages before applying any online log in bulk load.
#
--
source
include
/
have_innodb
.
inc
--
source
include
/
have_debug
.
inc
# Create Insert Procedure
DELIMITER
|
;
CREATE
PROCEDURE
populate_t1
()
BEGIN
DECLARE
i
int
DEFAULT
1
;
START
TRANSACTION
;
WHILE
(
i
<=
10000
)
DO
INSERT
INTO
t1
VALUES
(
i
,
i
,
CONCAT
(
'a'
,
i
));
SET
i
=
i
+
1
;
END
WHILE
;
COMMIT
;
END
|
DELIMITER
;
|
CREATE
TABLE
t1
(
class
INT
,
id
INT
,
title
VARCHAR
(
100
)
)
ENGINE
=
InnoDB
;
--
disable_query_log
CALL
populate_t1
();
--
enable_query_log
SELECT
COUNT
(
*
)
FROM
t1
;
SET
@
saved_dbug
=
@@
SESSION
.
debug_dbug
;
SET
debug_dbug
=
'+d,ib_index_build_fail_before_flush'
;
--
error
ER_QUERY_INTERRUPTED
CREATE
INDEX
idx_id
ON
t1
(
id
);
CHECK
TABLE
t1
;
--
error
ER_QUERY_INTERRUPTED
CREATE
INDEX
idx_title
ON
t1
(
title
);
CHECK
TABLE
t1
;
--
error
ER_QUERY_INTERRUPTED
CREATE
FULLTEXT
INDEX
fidx_title
ON
t1
(
title
);
CHECK
TABLE
t1
;
--
error
ER_QUERY_INTERRUPTED
ALTER
TABLE
t1
ADD
COLUMN
content
TEXT
;
CHECK
TABLE
t1
;
SET
debug_dbug
=
@
saved_dbug
;
INSERT
INTO
t1
VALUES
(
10001
,
10001
,
'a10000'
);
--
error
ER_DUP_ENTRY
ALTER
TABLE
t1
ADD
UNIQUE
INDEX
idx_title
(
title
);
CHECK
TABLE
t1
;
--
error
ER_DUP_ENTRY
ALTER
TABLE
t1
ADD
UNIQUE
INDEX
idx_id
(
id
),
ADD
UNIQUE
INDEX
idx_title
(
title
);
CHECK
TABLE
t1
;
DROP
TABLE
t1
;
DROP
PROCEDURE
populate_t1
;
mysql-test/suite/innodb/t/innodb_bulk_create_index_replication.test
0 → 100644
View file @
366bb162
######## suite/innodb/t/innodb_wl7277_1.test #####
# #
# Testcase for worklog WL#7277: InnoDB: Bulk Load for Create Index #
# The basic idea of bulk load is to build an index from bottom up #
# (also known as sorted index build). #
# Earlier index was create by repeatedly inserting records #
# Test scenario : #
# - Run bulk create index on replication setup #
# - Run bulk create on partitioned table and see its replictaed #
# to slave #
# Creation: #
# 2014-06-19 Implemented this test as part of WL#7277 #
# #
######################################################################
--
source
include
/
not_embedded
.
inc
--
source
include
/
have_innodb
.
inc
--
source
include
/
have_partition
.
inc
--
source
include
/
master
-
slave
.
inc
--
connection
master
# Create Insert Procedure
DELIMITER
|
;
CREATE
PROCEDURE
populate_t1
(
load_even
INT
)
BEGIN
DECLARE
i
int
DEFAULT
1
;
START
TRANSACTION
;
WHILE
(
i
<=
100
)
DO
IF
i
%
2
=
0
AND
load_even
=
1
THEN
INSERT
INTO
t1
VALUES
(
i
,
i
,
CONCAT
(
'a'
,
i
));
END
IF
;
IF
i
%
2
!=
0
AND
load_even
!=
1
THEN
INSERT
INTO
t1
VALUES
(
i
,
i
,
CONCAT
(
'a'
,
i
));
END
IF
;
SET
i
=
i
+
1
;
END
WHILE
;
COMMIT
;
END
|
DELIMITER
;
|
CREATE
TABLE
t1
(
class
INT
,
id
INT
,
title
VARCHAR
(
100
)
)
ENGINE
=
InnoDB
;
--
disable_query_log
# Load half records
CALL
populate_t1
(
1
);
--
enable_query_log
SELECT
COUNT
(
*
)
FROM
t1
;
/* Create index. */
CREATE
INDEX
idx_id
ON
t1
(
id
);
CREATE
INDEX
idx_title
ON
t1
(
title
);
/* Select by index. */
EXPLAIN
SELECT
*
FROM
t1
WHERE
id
=
10
;
EXPLAIN
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
id
=
10
;
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
id
=
20
;
SELECT
*
FROM
t1
WHERE
title
=
'a20'
;
SELECT
*
FROM
t1
WHERE
id
=
30
;
SELECT
*
FROM
t1
WHERE
title
=
'a30'
;
SELECT
*
FROM
t1
WHERE
id
=
101
;
SELECT
*
FROM
t1
WHERE
title
=
'a101'
;
/*Insert/Update/Delete. */
DELETE
FROM
t1
WHERE
id
<
40
AND
id
>
30
;
INSERT
INTO
t1
VALUES
(
38
,
38
,
'b38'
);
UPDATE
t1
SET
title
=
CONCAT
(
'b'
,
id
)
WHERE
id
<
30
AND
id
>
20
;
SELECT
*
FROM
t1
WHERE
id
=
28
;
SELECT
*
FROM
t1
WHERE
title
=
'a28'
;
SELECT
*
FROM
t1
WHERE
title
=
'b28'
;
SELECT
*
FROM
t1
WHERE
id
=
38
;
SELECT
*
FROM
t1
WHERE
title
=
'a38'
;
SELECT
*
FROM
t1
WHERE
title
=
'b38'
;
SELECT
*
FROM
t1
WHERE
id
=
101
;
SELECT
*
FROM
t1
WHERE
title
=
'a101'
;
--
disable_query_log
# Load half records (follow up load)
CALL
populate_t1
(
0
);
--
enable_query_log
SELECT
COUNT
(
*
)
FROM
t1
;
SELECT
*
FROM
t1
WHERE
id
=
10
;
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
id
=
20
;
SELECT
*
FROM
t1
WHERE
title
=
'a20'
;
SELECT
*
FROM
t1
WHERE
id
=
30
;
SELECT
*
FROM
t1
WHERE
title
=
'a30'
;
SELECT
*
FROM
t1
WHERE
id
=
101
;
SELECT
*
FROM
t1
WHERE
title
=
'a101'
;
# Create partition table
CREATE
TABLE
t_part
(
class
INT
,
id
INT
,
title
VARCHAR
(
30
)
)
ENGINE
=
InnoDB
PARTITION
BY
RANGE
(
id
)
SUBPARTITION
BY
KEY
(
id
)
SUBPARTITIONS
4
(
PARTITION
p0
VALUES
LESS
THAN
(
5000
),
PARTITION
p1
VALUES
LESS
THAN
(
MAXVALUE
)
);
INSERT
INTO
t_part
SELECT
*
FROM
t1
;
ALTER
TABLE
t_part
ADD
INDEX
`idx`
(
class
,
id
,
title
(
10
));
SELECT
*
FROM
t_part
WHERE
id
=
10
;
SELECT
*
FROM
t_part
WHERE
title
=
'a10'
;
SELECT
*
FROM
t_part
WHERE
id
=
20
;
SELECT
*
FROM
t_part
WHERE
title
=
'a20'
;
SELECT
*
FROM
t_part
WHERE
id
=
30
;
SELECT
*
FROM
t_part
WHERE
title
=
'a30'
;
SELECT
*
FROM
t_part
WHERE
id
=
101
;
SELECT
*
FROM
t_part
WHERE
title
=
'a101'
;
--
source
include
/
sync_slave_sql_with_master
.
inc
--
connection
slave
SHOW
CREATE
TABLE
t1
;
SHOW
CREATE
TABLE
t_part
;
SELECT
COUNT
(
*
)
FROM
t1
;
SELECT
COUNT
(
*
)
FROM
t_part
;
SELECT
*
FROM
t1
WHERE
id
=
10
;
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
id
=
20
;
SELECT
*
FROM
t1
WHERE
title
=
'a20'
;
SELECT
*
FROM
t1
WHERE
id
=
30
;
SELECT
*
FROM
t1
WHERE
title
=
'a30'
;
SELECT
*
FROM
t1
WHERE
id
=
101
;
SELECT
*
FROM
t1
WHERE
title
=
'a101'
;
SELECT
*
FROM
t_part
WHERE
id
=
10
;
SELECT
*
FROM
t_part
WHERE
title
=
'a10'
;
SELECT
*
FROM
t_part
WHERE
id
=
20
;
SELECT
*
FROM
t_part
WHERE
title
=
'a20'
;
SELECT
*
FROM
t_part
WHERE
id
=
30
;
SELECT
*
FROM
t_part
WHERE
title
=
'a30'
;
SELECT
*
FROM
t_part
WHERE
id
=
101
;
SELECT
*
FROM
t_part
WHERE
title
=
'a101'
;
--
connection
master
DROP
PROCEDURE
populate_t1
;
DROP
TABLE
t1
;
DROP
TABLE
t_part
;
--
source
include
/
rpl_end
.
inc
mysql-test/suite/innodb/t/innodb_bulk_create_index_small.test
0 → 100644
View file @
366bb162
#
# wl#7277: InnoDB: Bulk Load for Create Index
#
--
source
include
/
innodb_page_size_small
.
inc
# Create Insert Procedure
DELIMITER
|
;
CREATE
PROCEDURE
populate_t1
()
BEGIN
DECLARE
i
int
DEFAULT
1
;
START
TRANSACTION
;
WHILE
(
i
<=
1000
)
DO
INSERT
INTO
t1
VALUES
(
i
,
i
,
CONCAT
(
'a'
,
i
));
SET
i
=
i
+
1
;
END
WHILE
;
COMMIT
;
END
|
DELIMITER
;
|
SELECT
@@
innodb_fill_factor
;
# Test Compact Table
CREATE
TABLE
t1
(
class
INT
,
id
INT
,
title
VARCHAR
(
100
)
)
ENGINE
=
InnoDB
ROW_FORMAT
=
COMPACT
;
--
disable_query_log
CALL
populate_t1
();
--
enable_query_log
SELECT
COUNT
(
*
)
FROM
t1
;
/* Create index. */
CREATE
INDEX
idx_id
ON
t1
(
id
);
CREATE
INDEX
idx_title
ON
t1
(
title
);
/* Check table. */
CHECK
TABLE
t1
;
/* Select by index. */
EXPLAIN
SELECT
*
FROM
t1
WHERE
id
=
10
;
EXPLAIN
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
id
=
10
;
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
id
=
500
;
SELECT
*
FROM
t1
WHERE
title
=
'a500'
;
SELECT
*
FROM
t1
WHERE
id
=
1000
;
SELECT
*
FROM
t1
WHERE
title
=
'a1000'
;
SELECT
*
FROM
t1
WHERE
id
=
1010
;
SELECT
*
FROM
t1
WHERE
title
=
'a1010'
;
DROP
TABLE
t1
;
# Test Blob
CREATE
TABLE
t1
(
a
INT
PRIMARY
KEY
,
b
TEXT
,
c
TEXT
)
ENGINE
=
InnoDB
ROW_FORMAT
=
COMPACT
;
INSERT
INTO
t1
VALUES
(
1
,
REPEAT
(
'a'
,
10000
),
'a'
),
(
2
,
REPEAT
(
'b'
,
20000
),
'b'
),
(
3
,
REPEAT
(
'c'
,
40000
),
'c'
),
(
4
,
REPEAT
(
'd'
,
60000
),
'd'
);
ALTER
TABLE
t1
DROP
COLUMN
c
;
CHECK
TABLE
t1
;
SELECT
CHAR_LENGTH
(
b
)
FROM
t1
WHERE
a
=
4975
;
DROP
TABLE
t1
;
SET
GLOBAL
innodb_file_per_table
=
default
;
# Test Compressed Table
SET
GLOBAL
innodb_file_per_table
=
1
;
CREATE
TABLE
t1
(
class
INT
,
id
INT
,
title
VARCHAR
(
100
)
)
ENGINE
=
InnoDB
ROW_FORMAT
=
COMPRESSED
KEY_BLOCK_SIZE
=
4
;
--
disable_query_log
CALL
populate_t1
();
--
enable_query_log
SELECT
COUNT
(
*
)
FROM
t1
;
/* Create index. */
CREATE
INDEX
idx_id
ON
t1
(
id
);
CREATE
INDEX
idx_title
ON
t1
(
title
);
/* Check table. */
CHECK
TABLE
t1
;
/* Select by index. */
EXPLAIN
SELECT
*
FROM
t1
WHERE
id
=
10
;
EXPLAIN
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
id
=
10
;
SELECT
*
FROM
t1
WHERE
title
=
'a10'
;
SELECT
*
FROM
t1
WHERE
id
=
500
;
SELECT
*
FROM
t1
WHERE
title
=
'a500'
;
SELECT
*
FROM
t1
WHERE
id
=
1000
;
SELECT
*
FROM
t1
WHERE
title
=
'a1000'
;
SELECT
*
FROM
t1
WHERE
id
=
1010
;
SELECT
*
FROM
t1
WHERE
title
=
'a1010'
;
DROP
TABLE
t1
;
# Test Compression & Blob
CREATE
TABLE
t1
(
a
INT
PRIMARY
KEY
,
b
TEXT
,
c
TEXT
)
ENGINE
=
InnoDB
ROW_FORMAT
=
COMPRESSED
KEY_BLOCK_SIZE
=
4
;
INSERT
INTO
t1
VALUES
(
1
,
REPEAT
(
'a'
,
10000
),
'a'
),
(
2
,
REPEAT
(
'b'
,
20000
),
'b'
),
(
3
,
REPEAT
(
'c'
,
40000
),
'c'
),
(
4
,
REPEAT
(
'd'
,
60000
),
'd'
);
ALTER
TABLE
t1
DROP
COLUMN
c
;
CHECK
TABLE
t1
;
SELECT
CHAR_LENGTH
(
b
)
FROM
t1
WHERE
a
=
4975
;
DROP
TABLE
t1
;
SET
GLOBAL
innodb_file_per_table
=
default
;
DROP
PROCEDURE
populate_t1
;
mysql-test/suite/innodb/t/truncate_restart.test
0 → 100644
View file @
366bb162
--
source
include
/
have_innodb
.
inc
--
source
include
/
have_debug
.
inc
--
source
include
/
have_debug_sync
.
inc
call
mtr
.
add_suppression
(
"InnoDB: Cannot save table statistics for table `test`
\\
.`t1`: Persistent statistics do not exist"
);
SET
GLOBAL
innodb_stats_persistent
=
ON
;
CREATE
TABLE
t1
(
t
TEXT
)
ENGINE
=
InnoDB
;
--
connect
(
con1
,
localhost
,
root
,,
test
)
SET
DEBUG_SYNC
=
'ib_trunc_table_trunc_completing SIGNAL committed WAIT_FOR ever'
;
--
send
TRUNCATE
TABLE
t1
;
--
connection
default
SET
DEBUG_SYNC
=
'now WAIT_FOR committed'
;
--
source
include
/
restart_mysqld
.
inc
--
disconnect
con1
SELECT
COUNT
(
*
)
FROM
t1
;
DROP
TABLE
t1
;
storage/innobase/buf/buf0flu.cc
View file @
366bb162
...
@@ -3821,16 +3821,14 @@ FlushObserver::notify_remove(
...
@@ -3821,16 +3821,14 @@ FlushObserver::notify_remove(
void
void
FlushObserver
::
flush
()
FlushObserver
::
flush
()
{
{
ut_ad
(
m_trx
);
if
(
!
m_interrupted
&&
m_stage
)
{
if
(
!
m_interrupted
&&
m_stage
)
{
m_stage
->
begin_phase_flush
(
buf_flush_get_dirty_pages_count
(
m_stage
->
begin_phase_flush
(
buf_flush_get_dirty_pages_count
(
m_space_id
,
this
));
m_space_id
,
this
));
}
}
/* MDEV-14317 FIXME: Discard all changes to only those pages
buf_LRU_flush_or_remove_pages
(
m_space_id
,
this
);
that will be freed by the clean-up of the ALTER operation.
(Maybe, instead of buf_pool->flush_list, use a dedicated list
for pages on which redo logging has been disabled.) */
buf_LRU_flush_or_remove_pages
(
m_space_id
,
m_trx
);
/* Wait for all dirty pages were flushed. */
/* Wait for all dirty pages were flushed. */
for
(
ulint
i
=
0
;
i
<
srv_buf_pool_instances
;
i
++
)
{
for
(
ulint
i
=
0
;
i
<
srv_buf_pool_instances
;
i
++
)
{
...
...
storage/innobase/buf/buf0lru.cc
View file @
366bb162
...
@@ -542,27 +542,21 @@ buf_flush_or_remove_page(
...
@@ -542,27 +542,21 @@ buf_flush_or_remove_page(
return
(
processed
);
return
(
processed
);
}
}
/******************************************************************//**
/** Remove all dirty pages belonging to a given tablespace inside a specific
Remove all dirty pages belonging to a given tablespace inside a specific
buffer pool instance when we are deleting the data file(s) of that
buffer pool instance when we are deleting the data file(s) of that
tablespace. The pages still remain a part of LRU and are evicted from
tablespace. The pages still remain a part of LRU and are evicted from
the list as they age towards the tail of the LRU.
the list as they age towards the tail of the LRU.
@retval DB_SUCCESS if all freed
@param[in,out] buf_pool buffer pool
@retval DB_FAIL if not all freed
@param[in] id tablespace identifier
@retval DB_INTERRUPTED if the transaction was interrupted */
@param[in] observer flush observer (to check for interrupt),
or NULL if the files should not be written to
@return whether all dirty pages were freed */
static
MY_ATTRIBUTE
((
warn_unused_result
))
static
MY_ATTRIBUTE
((
warn_unused_result
))
dberr_t
bool
buf_flush_or_remove_pages
(
buf_flush_or_remove_pages
(
/*======================*/
buf_pool_t
*
buf_pool
,
buf_pool_t
*
buf_pool
,
/*!< buffer pool instance */
ulint
id
,
ulint
id
,
/*!< in: target space id for which
FlushObserver
*
observer
)
to remove or flush pages */
FlushObserver
*
observer
,
/*!< in: flush observer */
bool
flush
,
/*!< in: flush to disk if true but
don't remove else remove without
flushing to disk */
const
trx_t
*
trx
)
/*!< to check if the operation must
be interrupted, can be 0 */
{
{
buf_page_t
*
prev
;
buf_page_t
*
prev
;
buf_page_t
*
bpage
;
buf_page_t
*
bpage
;
...
@@ -584,15 +578,27 @@ buf_flush_or_remove_pages(
...
@@ -584,15 +578,27 @@ buf_flush_or_remove_pages(
prev
=
UT_LIST_GET_PREV
(
list
,
bpage
);
prev
=
UT_LIST_GET_PREV
(
list
,
bpage
);
/* If flush observer is NULL, flush page for space id,
/* Flush the pages matching space id,
or flush page for flush observer. */
or pages matching the flush observer. */
if
(
observer
?
(
observer
!=
bpage
->
flush_observer
)
if
(
observer
&&
observer
->
is_partial_flush
())
{
:
(
id
!=
bpage
->
id
.
space
()))
{
if
(
observer
!=
bpage
->
flush_observer
)
{
/* Skip this block. */
/* Skip this block, as it does not belong to
}
else
if
(
!
buf_flush_or_remove_page
(
the target space. */
buf_pool
,
bpage
,
!
observer
->
is_interrupted
()))
{
}
else
if
(
!
buf_flush_or_remove_page
(
buf_pool
,
bpage
,
flush
))
{
all_freed
=
false
;
}
else
if
(
!
observer
->
is_interrupted
())
{
/* The processing was successful. And during the
processing we have released the buf_pool mutex
when calling buf_page_flush(). We cannot trust
prev pointer. */
goto
rescan
;
}
}
else
if
(
id
!=
bpage
->
id
.
space
())
{
/* Skip this block, because it is for a
different tablespace. */
}
else
if
(
!
buf_flush_or_remove_page
(
buf_pool
,
bpage
,
observer
!=
NULL
))
{
/* Remove was unsuccessful, we have to try again
/* Remove was unsuccessful, we have to try again
by scanning the entire list from the end.
by scanning the entire list from the end.
...
@@ -615,7 +621,7 @@ buf_flush_or_remove_pages(
...
@@ -615,7 +621,7 @@ buf_flush_or_remove_pages(
iteration. */
iteration. */
all_freed
=
false
;
all_freed
=
false
;
}
else
if
(
flush
)
{
}
else
if
(
observer
)
{
/* The processing was successful. And during the
/* The processing was successful. And during the
processing we have released the buf_pool mutex
processing we have released the buf_pool mutex
...
@@ -636,25 +642,14 @@ buf_flush_or_remove_pages(
...
@@ -636,25 +642,14 @@ buf_flush_or_remove_pages(
/* The check for trx is interrupted is expensive, we want
/* The check for trx is interrupted is expensive, we want
to check every N iterations. */
to check every N iterations. */
if
(
!
processed
&&
trx
&&
trx_is_interrupted
(
trx
))
{
if
(
!
processed
&&
observer
)
{
if
(
trx
->
flush_observer
!=
NULL
)
{
observer
->
check_interrupted
();
if
(
flush
)
{
trx
->
flush_observer
->
interrupted
();
}
else
{
/* We should remove all pages with the
the flush observer. */
continue
;
}
}
buf_flush_list_mutex_exit
(
buf_pool
);
return
(
DB_INTERRUPTED
);
}
}
}
}
buf_flush_list_mutex_exit
(
buf_pool
);
buf_flush_list_mutex_exit
(
buf_pool
);
return
(
all_freed
?
DB_SUCCESS
:
DB_FAIL
);
return
(
all_freed
);
}
}
/** Remove or flush all the dirty pages that belong to a given tablespace
/** Remove or flush all the dirty pages that belong to a given tablespace
...
@@ -665,73 +660,58 @@ the tail of the LRU list.
...
@@ -665,73 +660,58 @@ the tail of the LRU list.
@param[in] id tablespace identifier
@param[in] id tablespace identifier
@param[in] observer flush observer,
@param[in] observer flush observer,
or NULL if the files should not be written to
or NULL if the files should not be written to
@param[in] trx transaction (to check for interrupt),
or NULL if the files should not be written to
*/
*/
static
static
void
void
buf_flush_dirty_pages
(
buf_flush_dirty_pages
(
buf_pool_t
*
buf_pool
,
buf_pool_t
*
buf_pool
,
ulint
id
,
ulint
id
,
FlushObserver
*
observer
,
FlushObserver
*
observer
)
const
trx_t
*
trx
)
{
{
dberr_t
err
;
for
(;;)
{
bool
flush
=
trx
!=
NULL
;
do
{
buf_pool_mutex_enter
(
buf_pool
);
buf_pool_mutex_enter
(
buf_pool
);
err
=
buf_flush_or_remove_pages
(
bool
freed
=
buf_flush_or_remove_pages
(
buf_pool
,
id
,
observer
);
buf_pool
,
id
,
observer
,
flush
,
trx
);
buf_pool_mutex_exit
(
buf_pool
);
buf_pool_mutex_exit
(
buf_pool
);
ut_ad
(
buf_flush_validate
(
buf_pool
));
ut_ad
(
buf_flush_validate
(
buf_pool
));
if
(
err
==
DB_FAIL
)
{
if
(
freed
)
{
os_thread_sleep
(
2000
);
break
;
}
if
(
err
==
DB_INTERRUPTED
&&
observer
!=
NULL
)
{
ut_a
(
flush
);
flush
=
false
;
err
=
DB_FAIL
;
}
}
/* DB_FAIL is a soft error, it means that the task wasn't
os_thread_sleep
(
2000
);
completed, needs to be retried. */
ut_ad
(
buf_flush_validate
(
buf_pool
));
ut_ad
(
buf_flush_validate
(
buf_pool
));
}
}
while
(
err
==
DB_FAIL
);
ut_ad
((
observer
&&
observer
->
is_interrupted
())
ut_ad
(
err
==
DB_INTERRUPTED
||
buf_pool_get_dirty_pages_count
(
buf_pool
,
id
,
observer
)
==
0
);
||
buf_pool_get_dirty_pages_count
(
buf_pool
,
id
,
observer
)
==
0
);
}
}
/** Empty the flush list for all pages belonging to a tablespace.
/** Empty the flush list for all pages belonging to a tablespace.
@param[in] id tablespace identifier
@param[in] id tablespace identifier
@param[in]
trx transaction, for checking for user interrupt;
@param[in]
observer flush observer,
or NULL if nothing is to be written
or NULL if nothing is to be written
@param[in] drop_ahi whether to drop the adaptive hash index */
@param[in] drop_ahi whether to drop the adaptive hash index */
void
void
buf_LRU_flush_or_remove_pages
(
ulint
id
,
const
trx_t
*
trx
,
bool
drop_ahi
)
buf_LRU_flush_or_remove_pages
(
ulint
id
,
FlushObserver
*
observer
,
bool
drop_ahi
)
{
{
FlushObserver
*
observer
=
(
trx
==
NULL
)
?
NULL
:
trx
->
flush_observer
;
/* Pages in the system tablespace must never be discarded. */
/* Pages in the system tablespace must never be discarded. */
ut_ad
(
id
||
trx
);
ut_ad
(
id
||
observer
);
for
(
ulint
i
=
0
;
i
<
srv_buf_pool_instances
;
i
++
)
{
for
(
ulint
i
=
0
;
i
<
srv_buf_pool_instances
;
i
++
)
{
buf_pool_t
*
buf_pool
=
buf_pool_from_array
(
i
);
buf_pool_t
*
buf_pool
=
buf_pool_from_array
(
i
);
if
(
drop_ahi
)
{
if
(
drop_ahi
)
{
buf_LRU_drop_page_hash_for_tablespace
(
buf_pool
,
id
);
buf_LRU_drop_page_hash_for_tablespace
(
buf_pool
,
id
);
}
}
buf_flush_dirty_pages
(
buf_pool
,
id
,
observer
,
trx
);
buf_flush_dirty_pages
(
buf_pool
,
id
,
observer
);
}
}
if
(
trx
&&
!
observer
&&
!
trx_is_interrupted
(
trx
))
{
if
(
observer
&&
!
observer
->
is_interrupted
(
))
{
/* Ensure that all asynchronous IO is completed. */
/* Ensure that all asynchronous IO is completed. */
os_aio_wait_until_no_pending_writes
();
os_aio_wait_until_no_pending_writes
();
fil_flush
(
id
);
fil_flush
(
id
);
...
...
storage/innobase/dict/dict0stats.cc
View file @
366bb162
...
@@ -293,7 +293,10 @@ dict_stats_exec_sql(
...
@@ -293,7 +293,10 @@ dict_stats_exec_sql(
ut_ad
(
rw_lock_own
(
dict_operation_lock
,
RW_LOCK_X
));
ut_ad
(
rw_lock_own
(
dict_operation_lock
,
RW_LOCK_X
));
ut_ad
(
mutex_own
(
&
dict_sys
->
mutex
));
ut_ad
(
mutex_own
(
&
dict_sys
->
mutex
));
if
(
!
dict_stats_persistent_storage_check
(
true
))
{
extern
bool
dict_stats_start_shutdown
;
if
(
dict_stats_start_shutdown
||
!
dict_stats_persistent_storage_check
(
true
))
{
pars_info_free
(
pinfo
);
pars_info_free
(
pinfo
);
return
(
DB_STATS_DO_NOT_EXIST
);
return
(
DB_STATS_DO_NOT_EXIST
);
}
}
...
...
storage/innobase/fil/fil0fil.cc
View file @
366bb162
...
@@ -2907,7 +2907,10 @@ fil_close_tablespace(
...
@@ -2907,7 +2907,10 @@ fil_close_tablespace(
completely and permanently. The flag stop_new_ops also prevents
completely and permanently. The flag stop_new_ops also prevents
fil_flush() from being applied to this tablespace. */
fil_flush() from being applied to this tablespace. */
buf_LRU_flush_or_remove_pages
(
id
,
trx
);
{
FlushObserver
observer
(
id
,
trx
,
NULL
);
buf_LRU_flush_or_remove_pages
(
id
,
&
observer
);
}
/* If the free is successful, the X lock will be released before
/* If the free is successful, the X lock will be released before
the space memory data structure is freed. */
the space memory data structure is freed. */
...
...
storage/innobase/include/buf0flu.h
View file @
366bb162
...
@@ -363,6 +363,12 @@ class FlushObserver {
...
@@ -363,6 +363,12 @@ class FlushObserver {
||
m_interrupted
);
||
m_interrupted
);
}
}
/** @return whether to flush only some pages of the tablespace */
bool
is_partial_flush
()
const
{
return
m_stage
!=
NULL
;
}
/** @return whether the operation was interrupted */
bool
is_interrupted
()
const
{
return
m_interrupted
;
}
/** Interrupt observer not to wait. */
/** Interrupt observer not to wait. */
void
interrupted
()
void
interrupted
()
{
{
...
@@ -375,7 +381,6 @@ class FlushObserver {
...
@@ -375,7 +381,6 @@ class FlushObserver {
/** Flush dirty pages. */
/** Flush dirty pages. */
void
flush
();
void
flush
();
/** Notify observer of flushing a page
/** Notify observer of flushing a page
@param[in] buf_pool buffer pool instance
@param[in] buf_pool buffer pool instance
@param[in] bpage buffer page to flush */
@param[in] bpage buffer page to flush */
...
@@ -391,10 +396,10 @@ class FlushObserver {
...
@@ -391,10 +396,10 @@ class FlushObserver {
buf_page_t
*
bpage
);
buf_page_t
*
bpage
);
private:
private:
/** Table space id */
/** Table space id */
ulint
m_space_id
;
const
ulint
m_space_id
;
/** Trx instance */
/** Trx instance */
trx_t
*
m_trx
;
trx_t
*
const
m_trx
;
/** Performance schema accounting object, used by ALTER TABLE.
/** Performance schema accounting object, used by ALTER TABLE.
If not NULL, then stage->begin_phase_flush() will be called initially,
If not NULL, then stage->begin_phase_flush() will be called initially,
...
...
storage/innobase/include/buf0lru.h
View file @
366bb162
...
@@ -52,12 +52,14 @@ These are low-level functions
...
@@ -52,12 +52,14 @@ These are low-level functions
/** Empty the flush list for all pages belonging to a tablespace.
/** Empty the flush list for all pages belonging to a tablespace.
@param[in] id tablespace identifier
@param[in] id tablespace identifier
@param[in
] trx transaction, for checking for user interrupt;
@param[in
,out] observer flush observer,
or NULL if nothing is to be written
or NULL if nothing is to be written
@param[in] drop_ahi whether to drop the adaptive hash index */
@param[in] drop_ahi whether to drop the adaptive hash index */
UNIV_INTERN
void
void
buf_LRU_flush_or_remove_pages
(
ulint
id
,
const
trx_t
*
trx
,
bool
drop_ahi
=
false
);
buf_LRU_flush_or_remove_pages
(
ulint
id
,
FlushObserver
*
observer
,
bool
drop_ahi
=
false
);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/********************************************************************//**
/********************************************************************//**
...
...
storage/innobase/row/row0import.cc
View file @
366bb162
...
@@ -3653,11 +3653,16 @@ row_import_for_mysql(
...
@@ -3653,11 +3653,16 @@ row_import_for_mysql(
The only dirty pages generated should be from the pessimistic purge
The only dirty pages generated should be from the pessimistic purge
of delete marked records that couldn't be purged in Phase I. */
of delete marked records that couldn't be purged in Phase I. */
buf_LRU_flush_or_remove_pages
(
prebuilt
->
table
->
space
,
trx
);
{
FlushObserver
observer
(
prebuilt
->
table
->
space
,
trx
,
NULL
);
buf_LRU_flush_or_remove_pages
(
prebuilt
->
table
->
space
,
&
observer
);
if
(
trx_is_interrupted
(
trx
))
{
if
(
observer
.
is_interrupted
(
))
{
ib
::
info
()
<<
"Phase III - Flush interrupted"
;
ib
::
info
()
<<
"Phase III - Flush interrupted"
;
return
(
row_import_error
(
prebuilt
,
trx
,
DB_INTERRUPTED
));
return
(
row_import_error
(
prebuilt
,
trx
,
DB_INTERRUPTED
));
}
}
}
ib
::
info
()
<<
"Phase IV - Flush complete"
;
ib
::
info
()
<<
"Phase IV - Flush complete"
;
...
...
storage/innobase/row/row0merge.cc
View file @
366bb162
...
@@ -5024,7 +5024,7 @@ row_merge_build_indexes(
...
@@ -5024,7 +5024,7 @@ row_merge_build_indexes(
ut_ad
(
need_flush_observer
);
ut_ad
(
need_flush_observer
);
DBUG_EXECUTE_IF
(
"ib_index_build_fail_before_flush"
,
DBUG_EXECUTE_IF
(
"ib_index_build_fail_before_flush"
,
error
=
DB_
FAIL
;
error
=
DB_
INTERRUPTED
;
);
);
if
(
error
!=
DB_SUCCESS
)
{
if
(
error
!=
DB_SUCCESS
)
{
...
...
storage/innobase/row/row0quiesce.cc
View file @
366bb162
...
@@ -535,7 +535,10 @@ row_quiesce_table_start(
...
@@ -535,7 +535,10 @@ row_quiesce_table_start(
}
}
if
(
!
trx_is_interrupted
(
trx
))
{
if
(
!
trx_is_interrupted
(
trx
))
{
buf_LRU_flush_or_remove_pages
(
table
->
space
,
trx
);
{
FlushObserver
observer
(
table
->
space
,
trx
,
NULL
);
buf_LRU_flush_or_remove_pages
(
table
->
space
,
&
observer
);
}
if
(
trx_is_interrupted
(
trx
))
{
if
(
trx_is_interrupted
(
trx
))
{
...
...
storage/innobase/srv/srv0start.cc
View file @
366bb162
...
@@ -1100,22 +1100,19 @@ srv_undo_tablespaces_init(bool create_new_db)
...
@@ -1100,22 +1100,19 @@ srv_undo_tablespaces_init(bool create_new_db)
mtr_commit
(
&
mtr
);
mtr_commit
(
&
mtr
);
/* Step-2: Flush the dirty pages from the buffer pool. */
/* Step-2: Flush the dirty pages from the buffer pool. */
trx_t
*
trx
=
trx_allocate_for_background
();
for
(
undo
::
undo_spaces_t
::
const_iterator
it
for
(
undo
::
undo_spaces_t
::
const_iterator
it
=
undo
::
Truncate
::
s_fix_up_spaces
.
begin
();
=
undo
::
Truncate
::
s_fix_up_spaces
.
begin
();
it
!=
undo
::
Truncate
::
s_fix_up_spaces
.
end
();
it
!=
undo
::
Truncate
::
s_fix_up_spaces
.
end
();
++
it
)
{
++
it
)
{
FlushObserver
dummy
(
TRX_SYS_SPACE
,
NULL
,
NULL
);
buf_LRU_flush_or_remove_pages
(
TRX_SYS_SPACE
,
trx
);
buf_LRU_flush_or_remove_pages
(
TRX_SYS_SPACE
,
&
dummy
);
FlushObserver
dummy2
(
*
it
,
NULL
,
NULL
);
buf_LRU_flush_or_remove_pages
(
*
it
,
trx
);
buf_LRU_flush_or_remove_pages
(
*
it
,
&
dummy2
);
/* Remove the truncate redo log file. */
/* Remove the truncate redo log file. */
undo
::
Truncate
undo_trunc
;
undo
::
Truncate
undo_trunc
;
undo_trunc
.
done_logging
(
*
it
);
undo_trunc
.
done_logging
(
*
it
);
}
}
trx_free_for_background
(
trx
);
}
}
return
(
DB_SUCCESS
);
return
(
DB_SUCCESS
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment