Commit dc588e3d authored by Alexander Barkov's avatar Alexander Barkov

Merge remote-tracking branch 'origin/10.3' into 10.4

parents f1dcbc2d 7e44c455
......@@ -951,6 +951,18 @@ SELECT json_valid('{"test": "\\ud83d\\ude0b"}');
json_valid('{"test": "\\ud83d\\ude0b"}')
1
#
# MDEV-19670 json escaped unicode parse error
#
SELECT JSON_VALID('{"admin\\"": null}'), '{"admin\\"": null}'
UNION
SELECT JSON_VALID('{"\\"admin": null}'), '{"\\"admin": null}'
UNION
SELECT JSON_VALID('{"\\"": null}'), '{"\\"": null}';
JSON_VALID('{"admin\\"": null}') {"admin\"": null}
1 {"admin\"": null}
1 {"\"admin": null}
1 {"\"": null}
#
# End of 10.3 tests
#
#
......
......@@ -568,6 +568,16 @@ SELECT
SELECT json_valid('{"value":"\\ud83d\\ude0a"}');
SELECT json_valid('{"test": "\\ud83d\\ude0b"}');
--echo #
--echo # MDEV-19670 json escaped unicode parse error
--echo #
SELECT JSON_VALID('{"admin\\"": null}'), '{"admin\\"": null}'
UNION
SELECT JSON_VALID('{"\\"admin": null}'), '{"\\"admin": null}'
UNION
SELECT JSON_VALID('{"\\"": null}'), '{"\\"": null}';
--echo #
--echo # End of 10.3 tests
--echo #
......
......@@ -2682,3 +2682,26 @@ myisam_block_size 1024
select @@global.myisam_block_size;
@@global.myisam_block_size
1024
#
# MDEV-20704 An index on a double column erroneously uses prefix compression
#
CREATE TABLE t1 (
id INT NOT NULL PRIMARY KEY,
d DOUBLE,
KEY (d)
) ENGINE=MyISAM;
MyISAM file: MYSQLD_DATADIR/test/t1
Record format: Fixed length
Character set: latin1_swedish_ci (8)
Data records: 0 Deleted blocks: 0
Recordlength: 13
table description:
Key Start Len Index Type
1 2 4 unique long
2 6 8 multip. double NULL
DROP TABLE t1;
#
# End of 5.5 tests
#
......@@ -1810,3 +1810,22 @@ drop table t1;
#
show variables like 'myisam_block_size';
select @@global.myisam_block_size;
--echo #
--echo # MDEV-20704 An index on a double column erroneously uses prefix compression
--echo #
CREATE TABLE t1 (
id INT NOT NULL PRIMARY KEY,
d DOUBLE,
KEY (d)
) ENGINE=MyISAM;
let $MYSQLD_DATADIR= `select @@datadir`;
--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
--exec $MYISAMCHK -d $MYSQLD_DATADIR/test/t1
DROP TABLE t1;
--echo #
--echo # End of 5.5 tests
--echo #
......@@ -58,7 +58,7 @@ set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT l_orderkey,
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
l_quantity > 45;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (11%) Using index condition; Using where; Using rowid filter
1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) Using index condition; Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
l_quantity > 45;
......@@ -78,11 +78,11 @@ EXPLAIN
"key": "i_l_quantity",
"used_key_parts": ["l_quantity"]
},
"rows": 662,
"selectivity_pct": 11.024
"rows": 702,
"selectivity_pct": 11.69
},
"rows": 509,
"filtered": 11.024,
"filtered": 11.69,
"index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
"attached_condition": "lineitem.l_quantity > 45"
}
......@@ -92,7 +92,7 @@ set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT l_orderkey,
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
l_quantity > 45;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (11%) 60.00 (3%) 11.02 100.00 Using index condition; Using where; Using rowid filter
1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) 60.00 (3%) 11.69 100.00 Using index condition; Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
l_quantity > 45;
......@@ -114,8 +114,8 @@ ANALYZE
"key": "i_l_quantity",
"used_key_parts": ["l_quantity"]
},
"rows": 662,
"selectivity_pct": 11.024,
"rows": 702,
"selectivity_pct": 11.69,
"r_rows": 605,
"r_selectivity_pct": 3.6855,
"r_buffer_size": "REPLACED",
......@@ -125,7 +125,7 @@ ANALYZE
"rows": 509,
"r_rows": 60,
"r_total_time_ms": "REPLACED",
"filtered": 11.024,
"filtered": 11.69,
"r_filtered": 100,
"index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
"attached_condition": "lineitem.l_quantity > 45"
......@@ -216,7 +216,7 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["l_shipDATE"],
"rows": 509,
"filtered": 11.024,
"filtered": 11.69,
"index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
"attached_condition": "lineitem.l_quantity > 45"
}
......@@ -226,7 +226,7 @@ set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT l_orderkey,
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
l_quantity > 45;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_shipdate 4 NULL 509 510.00 11.02 11.76 Using index condition; Using where
1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_shipdate 4 NULL 509 510.00 11.69 11.76 Using index condition; Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
l_quantity > 45;
......@@ -247,7 +247,7 @@ ANALYZE
"rows": 509,
"r_rows": 510,
"r_total_time_ms": "REPLACED",
"filtered": 11.024,
"filtered": 11.69,
"r_filtered": 11.765,
"index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
"attached_condition": "lineitem.l_quantity > 45"
......@@ -362,11 +362,11 @@ EXPLAIN
"key": "i_o_totalprice",
"used_key_parts": ["o_totalprice"]
},
"rows": 81,
"selectivity_pct": 5.4
"rows": 69,
"selectivity_pct": 4.6
},
"rows": 1,
"filtered": 5.4,
"filtered": 4.6,
"attached_condition": "orders.o_totalprice between 200000 and 230000"
}
}
......@@ -377,7 +377,7 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
o_totalprice between 200000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 98.00 100.00 100.00 Using index condition
1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) 0.11 (10%) 5.40 100.00 Using where; Using rowid filter
1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) 0.11 (10%) 4.60 100.00 Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
......@@ -421,8 +421,8 @@ ANALYZE
"key": "i_o_totalprice",
"used_key_parts": ["o_totalprice"]
},
"rows": 81,
"selectivity_pct": 5.4,
"rows": 69,
"selectivity_pct": 4.6,
"r_rows": 71,
"r_selectivity_pct": 10.417,
"r_buffer_size": "REPLACED",
......@@ -432,7 +432,7 @@ ANALYZE
"rows": 1,
"r_rows": 0.1122,
"r_total_time_ms": "REPLACED",
"filtered": 5.4,
"filtered": 4.6,
"r_filtered": 100,
"attached_condition": "orders.o_totalprice between 200000 and 230000"
}
......@@ -494,7 +494,7 @@ EXPLAIN
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
"rows": 1,
"filtered": 5.4,
"filtered": 4.6,
"attached_condition": "orders.o_totalprice between 200000 and 230000"
}
}
......@@ -505,7 +505,7 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
o_totalprice between 200000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 98.00 100.00 100.00 Using index condition
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 5.40 11.22 Using where
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 4.60 11.22 Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
......@@ -548,7 +548,7 @@ ANALYZE
"rows": 1,
"r_rows": 1,
"r_total_time_ms": "REPLACED",
"filtered": 5.4,
"filtered": 4.6,
"r_filtered": 11.224,
"attached_condition": "orders.o_totalprice between 200000 and 230000"
}
......@@ -576,8 +576,8 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
l_quantity > 45 AND
o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (11%) Using index condition; Using where; Using rowid filter
1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (10%) Using where; Using rowid filter
1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) Using index condition; Using where; Using rowid filter
1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (9%) Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
......@@ -605,11 +605,11 @@ EXPLAIN
"key": "i_l_quantity",
"used_key_parts": ["l_quantity"]
},
"rows": 662,
"selectivity_pct": 11.024
"rows": 702,
"selectivity_pct": 11.69
},
"rows": 509,
"filtered": 11.024,
"filtered": 11.69,
"index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
"attached_condition": "lineitem.l_quantity > 45"
},
......@@ -626,11 +626,11 @@ EXPLAIN
"key": "i_o_totalprice",
"used_key_parts": ["o_totalprice"]
},
"rows": 152,
"selectivity_pct": 10.133
"rows": 139,
"selectivity_pct": 9.2667
},
"rows": 1,
"filtered": 10.133,
"filtered": 9.2667,
"attached_condition": "orders.o_totalprice between 180000 and 230000"
}
}
......@@ -641,8 +641,8 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
l_quantity > 45 AND
o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (11%) 60.00 (3%) 11.02 100.00 Using index condition; Using where; Using rowid filter
1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (10%) 0.27 (25%) 10.13 100.00 Using where; Using rowid filter
1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (12%) 60.00 (3%) 11.69 100.00 Using index condition; Using where; Using rowid filter
1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (9%) 0.27 (25%) 9.27 100.00 Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
......@@ -672,8 +672,8 @@ ANALYZE
"key": "i_l_quantity",
"used_key_parts": ["l_quantity"]
},
"rows": 662,
"selectivity_pct": 11.024,
"rows": 702,
"selectivity_pct": 11.69,
"r_rows": 605,
"r_selectivity_pct": 3.6855,
"r_buffer_size": "REPLACED",
......@@ -683,7 +683,7 @@ ANALYZE
"rows": 509,
"r_rows": 60,
"r_total_time_ms": "REPLACED",
"filtered": 11.024,
"filtered": 11.69,
"r_filtered": 100,
"index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
"attached_condition": "lineitem.l_quantity > 45"
......@@ -701,8 +701,8 @@ ANALYZE
"key": "i_o_totalprice",
"used_key_parts": ["o_totalprice"]
},
"rows": 152,
"selectivity_pct": 10.133,
"rows": 139,
"selectivity_pct": 9.2667,
"r_rows": 144,
"r_selectivity_pct": 25.424,
"r_buffer_size": "REPLACED",
......@@ -712,7 +712,7 @@ ANALYZE
"rows": 1,
"r_rows": 0.2667,
"r_total_time_ms": "REPLACED",
"filtered": 10.133,
"filtered": 9.2667,
"r_filtered": 100,
"attached_condition": "orders.o_totalprice between 180000 and 230000"
}
......@@ -771,7 +771,7 @@ EXPLAIN
"key_length": "4",
"used_key_parts": ["l_shipDATE"],
"rows": 509,
"filtered": 11.024,
"filtered": 11.69,
"index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
"attached_condition": "lineitem.l_quantity > 45"
},
......@@ -784,7 +784,7 @@ EXPLAIN
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
"rows": 1,
"filtered": 10.133,
"filtered": 9.2667,
"attached_condition": "orders.o_totalprice between 180000 and 230000"
}
}
......@@ -795,8 +795,8 @@ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
l_quantity > 45 AND
o_totalprice between 180000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate 4 NULL 509 510.00 11.02 11.76 Using index condition; Using where
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 10.13 26.67 Using where
1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate 4 NULL 509 510.00 11.69 11.76 Using index condition; Using where
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 9.27 26.67 Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
......@@ -825,7 +825,7 @@ ANALYZE
"rows": 509,
"r_rows": 510,
"r_total_time_ms": "REPLACED",
"filtered": 11.024,
"filtered": 11.69,
"r_filtered": 11.765,
"index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
"attached_condition": "lineitem.l_quantity > 45"
......@@ -842,7 +842,7 @@ ANALYZE
"rows": 1,
"r_rows": 1,
"r_total_time_ms": "REPLACED",
"filtered": 10.133,
"filtered": 9.2667,
"r_filtered": 26.667,
"attached_condition": "orders.o_totalprice between 180000 and 230000"
}
......@@ -875,7 +875,7 @@ FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
o_totalprice between 200000 and 230000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 81 Using index condition
1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 Using index condition
1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
......@@ -892,7 +892,7 @@ EXPLAIN
"key": "i_o_totalprice",
"key_length": "9",
"used_key_parts": ["o_totalprice"],
"rows": 81,
"rows": 69,
"filtered": 100,
"index_condition": "orders.o_totalprice between 200000 and 230000"
},
......@@ -928,7 +928,7 @@ FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
o_totalprice between 200000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 81 71.00 100.00 100.00 Using index condition
1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 71.00 100.00 100.00 Using index condition
1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) 0.52 (7%) 8.48 100.00 Using where; Using rowid filter
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
......@@ -948,7 +948,7 @@ ANALYZE
"key_length": "9",
"used_key_parts": ["o_totalprice"],
"r_loops": 1,
"rows": 81,
"rows": 69,
"r_rows": 71,
"r_total_time_ms": "REPLACED",
"filtered": 100,
......@@ -1037,7 +1037,7 @@ FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
o_totalprice between 200000 and 230000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 81 Using index condition
1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 Using index condition
1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
......@@ -1054,7 +1054,7 @@ EXPLAIN
"key": "i_o_totalprice",
"key_length": "9",
"used_key_parts": ["o_totalprice"],
"rows": 81,
"rows": 69,
"filtered": 100,
"index_condition": "orders.o_totalprice between 200000 and 230000"
},
......@@ -1082,7 +1082,7 @@ FROM orders JOIN lineitem ON o_orderkey=l_orderkey
WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
o_totalprice between 200000 and 230000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 81 71.00 100.00 100.00 Using index condition
1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 69 71.00 100.00 100.00 Using index condition
1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.70 8.48 7.77 Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
FROM orders JOIN lineitem ON o_orderkey=l_orderkey
......@@ -1102,7 +1102,7 @@ ANALYZE
"key_length": "9",
"used_key_parts": ["o_totalprice"],
"r_loops": 1,
"rows": 81,
"rows": 69,
"r_rows": 71,
"r_total_time_ms": "REPLACED",
"filtered": 100,
......@@ -1227,7 +1227,7 @@ EXPLAIN
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
"rows": 1,
"filtered": 8.7333,
"filtered": 7.4667,
"attached_condition": "orders.o_totalprice between 200000 and 250000"
}
}
......@@ -1240,7 +1240,7 @@ l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
o_totalprice BETWEEN 200000 AND 250000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 18 18.00 0.57 38.89 Using index condition; Using where
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 8.73 14.29 Using where
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 7.47 14.29 Using where
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
FROM orders, lineitem
WHERE o_orderkey=l_orderkey AND
......@@ -1287,7 +1287,7 @@ ANALYZE
"rows": 1,
"r_rows": 1,
"r_total_time_ms": "REPLACED",
"filtered": 8.7333,
"filtered": 7.4667,
"r_filtered": 14.286,
"attached_condition": "orders.o_totalprice between 200000 and 250000"
}
......@@ -1347,7 +1347,7 @@ EXPLAIN
"used_key_parts": ["o_orderkey"],
"ref": ["dbt3_s001.lineitem.l_orderkey"],
"rows": 1,
"filtered": 8.7333,
"filtered": 7.4667,
"attached_condition": "orders.o_totalprice between 200000 and 250000"
}
}
......@@ -1360,7 +1360,7 @@ l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
o_totalprice BETWEEN 200000 AND 250000;
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 18 18.00 0.57 38.89 Using index condition; Using where
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 8.73 14.29 Using where
1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 7.47 14.29 Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
FROM orders, lineitem
WHERE o_orderkey=l_orderkey AND
......@@ -1407,7 +1407,7 @@ ANALYZE
"rows": 1,
"r_rows": 1,
"r_total_time_ms": "REPLACED",
"filtered": 8.7333,
"filtered": 7.4667,
"r_filtered": 14.286,
"attached_condition": "orders.o_totalprice between 200000 and 250000"
}
......@@ -1455,7 +1455,7 @@ EXPLAIN
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
"rows": 39,
"filtered": 3.2667,
"filtered": 3.2,
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000"
},
......@@ -1485,7 +1485,7 @@ o_totaldiscount BETWEEN 18000 AND 20000 AND
o_totalprice BETWEEN 200000 AND 220000 AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 3.27 2.44 Using index condition; Using where
1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 3.20 2.44 Using index condition; Using where
1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
FROM orders, lineitem
......@@ -1510,7 +1510,7 @@ ANALYZE
"rows": 39,
"r_rows": 41,
"r_total_time_ms": "REPLACED",
"filtered": 3.2667,
"filtered": 3.2,
"r_filtered": 2.439,
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000"
......@@ -1576,7 +1576,7 @@ EXPLAIN
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
"rows": 39,
"filtered": 3.2667,
"filtered": 3.2,
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000"
},
......@@ -1606,7 +1606,7 @@ o_totaldiscount BETWEEN 18000 AND 20000 AND
o_totalprice BETWEEN 200000 AND 220000 AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 3.27 2.44 Using index condition; Using where
1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 3.20 2.44 Using index condition; Using where
1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
FROM orders, lineitem
......@@ -1631,7 +1631,7 @@ ANALYZE
"rows": 39,
"r_rows": 41,
"r_total_time_ms": "REPLACED",
"filtered": 3.2667,
"filtered": 3.2,
"r_filtered": 2.439,
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000"
......@@ -1705,7 +1705,7 @@ EXPLAIN
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
"rows": 39,
"filtered": 1.9905,
"filtered": 1.9499,
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
},
......@@ -1735,7 +1735,7 @@ o_totaldiscount BETWEEN 18000 AND 20000 AND
o_totalprice BETWEEN 200000 AND 220000 AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 1.99 2.44 Using index condition; Using where
1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 1.95 2.44 Using index condition; Using where
1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
FROM v1, lineitem
......@@ -1765,7 +1765,7 @@ ANALYZE
"rows": 39,
"r_rows": 41,
"r_total_time_ms": "REPLACED",
"filtered": 1.9905,
"filtered": 1.9499,
"r_filtered": 2.439,
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
......@@ -1836,7 +1836,7 @@ EXPLAIN
"key_length": "9",
"used_key_parts": ["o_totaldiscount"],
"rows": 39,
"filtered": 1.9905,
"filtered": 1.9499,
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
},
......@@ -1866,7 +1866,7 @@ o_totaldiscount BETWEEN 18000 AND 20000 AND
o_totalprice BETWEEN 200000 AND 220000 AND
l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 1.99 2.44 Using index condition; Using where
1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 1.95 2.44 Using index condition; Using where
1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
FROM v1, lineitem
......@@ -1896,7 +1896,7 @@ ANALYZE
"rows": 39,
"r_rows": 41,
"r_total_time_ms": "REPLACED",
"filtered": 1.9905,
"filtered": 1.9499,
"r_filtered": 2.439,
"index_condition": "orders.o_totaldiscount between 18000 and 20000",
"attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
......
......@@ -335,9 +335,9 @@ from part, lineitem, orders
where p_retailprice > 1100 and o_orderdate='1997-01-01'
and o_orderkey=l_orderkey and p_partkey=l_partkey;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE part range PRIMARY,i_p_retailprice i_p_retailprice 9 NULL 1 Using index condition
1 SIMPLE orders ref PRIMARY,i_o_orderdate i_o_orderdate 4 const 1
1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
1 SIMPLE part eq_ref|filter PRIMARY,i_p_retailprice PRIMARY|i_p_retailprice 4|9 dbt3_s001.lineitem.l_partkey 1 (1%) Using where; Using rowid filter
select o_orderkey, p_partkey
from part, lineitem, orders
where p_retailprice > 1100 and o_orderdate='1997-01-01'
......
include/master-slave.inc
[connection master]
connection server_2;
include/stop_slave.inc
SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads;
SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode;
SET @old_dbug= @@GLOBAL.debug_dbug;
SET GLOBAL slave_parallel_mode='optimistic';
SET GLOBAL slave_parallel_threads= 3;
CHANGE MASTER TO master_use_gtid=slave_pos;
CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends");
include/start_slave.inc
connection server_1;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
include/save_master_gtid.inc
connection server_2;
include/sync_with_master_gtid.inc
connect con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,;
BEGIN;
INSERT INTO t1 VALUES (32);
connection server_1;
INSERT INTO t1 VALUES (32);
connection server_2;
SET GLOBAL debug_dbug="+d,hold_worker_on_schedule";
SET debug_sync="debug_sync_action SIGNAL reached_pause WAIT_FOR continue_worker";
connection server_1;
SET gtid_seq_no=100;
INSERT INTO t1 VALUES (33);
connection server_2;
SET debug_sync='now WAIT_FOR reached_pause';
connection server_1;
INSERT INTO t1 VALUES (34);
connection server_2;
connection con_temp2;
COMMIT;
connection server_2;
include/stop_slave.inc
include/assert.inc [table t1 should have zero rows where a>32]
SELECT * FROM t1 WHERE a>32;
a
DELETE FROM t1 WHERE a=32;
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
SET GLOBAL slave_parallel_mode=@old_parallel_mode;
SET GLOBAL debug_dbug=@old_debug;
SET DEBUG_SYNC= 'RESET';
include/start_slave.inc
connection server_1;
DROP TABLE t1;
include/rpl_end.inc
--source suite/rpl/include/rpl_parallel_ignored_errors.inc
# ==== Purpose ====
#
# Test verifies that, in parallel replication, transaction failure notification
# is propagated to all the workers. Workers should abort the execution of
# transaction event groups, whose event positions are higher than the failing
# transaction group.
#
# ==== Implementation ====
#
# Steps:
# 0 - Create a table t1 on master which has a primary key. Enable parallel
# replication on slave with slave_parallel_mode='optimistic' and
# slave_parallel_threads=3.
# 1 - On slave start a transaction and execute a local INSERT statement
# which will insert value 32. This is done to block the INSERT coming
# from master.
# 2 - On master execute an INSERT statement with value 32, so that it is
# blocked on slave.
# 3 - On slave enable a debug sync point such that it holds the worker thread
# execution as soon as work is scheduled to it.
# 4 - INSERT value 33 on master. It will be held on slave by other worker
# thread due to debug simulation.
# 5 - INSERT value 34 on master.
# 6 - On slave, enusre that INSERT 34 has reached a state where it waits for
# its prior transactions to commit.
# 7 - Commit the local INSERT 32 on slave server so that first worker will
# error out.
# 8 - Now send a continue signal to second worker processing 33. It should
# wakeup and propagate the error to INSERT 34.
# 9 - Upon slave stop due to error, check that no rows are found after the
# failed INSERT 32.
#
# ==== References ====
#
# MDEV-20645: Replication consistency is broken as workers miss the error
# notification from an earlier failed group.
#
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--source include/have_binlog_format_statement.inc
--source include/master-slave.inc
--enable_connect_log
--connection server_2
--source include/stop_slave.inc
SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads;
SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode;
SET @old_dbug= @@GLOBAL.debug_dbug;
SET GLOBAL slave_parallel_mode='optimistic';
SET GLOBAL slave_parallel_threads= 3;
CHANGE MASTER TO master_use_gtid=slave_pos;
CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends");
--source include/start_slave.inc
--connection server_1
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
--source include/save_master_gtid.inc
--connection server_2
--source include/sync_with_master_gtid.inc
--connect (con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,)
BEGIN;
INSERT INTO t1 VALUES (32);
--connection server_1
INSERT INTO t1 VALUES (32);
--connection server_2
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE info like "INSERT INTO t1 VALUES (32)"
--source include/wait_condition.inc
SET GLOBAL debug_dbug="+d,hold_worker_on_schedule";
SET debug_sync="debug_sync_action SIGNAL reached_pause WAIT_FOR continue_worker";
--connection server_1
SET gtid_seq_no=100;
INSERT INTO t1 VALUES (33);
--connection server_2
SET debug_sync='now WAIT_FOR reached_pause';
--connection server_1
INSERT INTO t1 VALUES (34);
--connection server_2
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state like "Waiting for prior transaction to commit"
--source include/wait_condition.inc
--connection con_temp2
COMMIT;
# Clean up.
--connection server_2
--source include/stop_slave.inc
--let $assert_cond= COUNT(*) = 0 FROM t1 WHERE a>32
--let $assert_text= table t1 should have zero rows where a>32
--source include/assert.inc
SELECT * FROM t1 WHERE a>32;
DELETE FROM t1 WHERE a=32;
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
SET GLOBAL slave_parallel_mode=@old_parallel_mode;
SET GLOBAL debug_dbug=@old_debug;
SET DEBUG_SYNC= 'RESET';
--source include/start_slave.inc
--connection server_1
DROP TABLE t1;
--disable_connect_log
--source include/rpl_end.inc
include/master-slave.inc
[connection master]
connection server_2;
include/stop_slave.inc
SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads;
SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode;
SET @old_dbug= @@GLOBAL.debug_dbug;
SET GLOBAL slave_parallel_mode='optimistic';
SET GLOBAL slave_parallel_threads= 3;
CHANGE MASTER TO master_use_gtid=slave_pos;
CALL mtr.add_suppression("Commit failed due to failure of an earlier commit on which this one depends");
include/start_slave.inc
connection server_1;
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
CREATE TABLE t1 (a int PRIMARY KEY) ENGINE=InnoDB;
include/save_master_gtid.inc
connection server_2;
include/sync_with_master_gtid.inc
connect con_temp2,127.0.0.1,root,,test,$SERVER_MYPORT_2,;
BEGIN;
INSERT INTO t1 VALUES (32);
connection server_1;
INSERT INTO t1 VALUES (32);
connection server_2;
SET GLOBAL debug_dbug="+d,hold_worker_on_schedule";
SET debug_sync="debug_sync_action SIGNAL reached_pause WAIT_FOR continue_worker";
connection server_1;
SET gtid_seq_no=100;
INSERT INTO t1 VALUES (33);
connection server_2;
SET debug_sync='now WAIT_FOR reached_pause';
connection server_1;
INSERT INTO t1 VALUES (34);
connection server_2;
connection con_temp2;
COMMIT;
connection server_2;
include/stop_slave.inc
include/assert.inc [table t1 should have zero rows where a>32]
SELECT * FROM t1 WHERE a>32;
a
DELETE FROM t1 WHERE a=32;
SET GLOBAL slave_parallel_threads=@old_parallel_threads;
SET GLOBAL slave_parallel_mode=@old_parallel_mode;
SET GLOBAL debug_dbug=@old_debug;
SET DEBUG_SYNC= 'RESET';
include/start_slave.inc
connection server_1;
DROP TABLE t1;
include/rpl_end.inc
--source include/rpl_parallel_ignored_errors.inc
......@@ -368,7 +368,6 @@ sub start_mysqlds()
print "wanted mysqld binary.\n\n";
$info_sent= 1;
}
$com.= $tmp;
if (!$suffix_found)
{
......@@ -376,6 +375,8 @@ sub start_mysqlds()
$com.= $groups[$i];
}
$com.= $tmp;
if ($opt_wsrep_new_cluster) {
$com.= " --wsrep-new-cluster";
}
......
......@@ -1139,12 +1139,9 @@ PSI_statement_info stmt_info_new_packet;
#endif
#ifndef EMBEDDED_LIBRARY
void net_before_header_psi(struct st_net *net, void *user_data, size_t /* unused: count */)
void net_before_header_psi(struct st_net *net, void *thd, size_t /* unused: count */)
{
THD *thd;
thd= static_cast<THD*> (user_data);
DBUG_ASSERT(thd != NULL);
DBUG_ASSERT(thd);
/*
We only come where when the server is IDLE, waiting for the next command.
Technically, it is a wait on a socket, which may take a long time,
......@@ -1153,7 +1150,8 @@ void net_before_header_psi(struct st_net *net, void *user_data, size_t /* unused
Instead, start explicitly an IDLE event.
*/
MYSQL_SOCKET_SET_STATE(net->vio->mysql_socket, PSI_SOCKET_STATE_IDLE);
MYSQL_START_IDLE_WAIT(thd->m_idle_psi, &thd->m_idle_state);
MYSQL_START_IDLE_WAIT(static_cast<THD*>(thd)->m_idle_psi,
&static_cast<THD*>(thd)->m_idle_state);
}
void net_after_header_psi(struct st_net *net, void *user_data,
......
......@@ -228,6 +228,12 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id,
entry->stop_on_error_sub_id == (uint64)ULONGLONG_MAX)
entry->stop_on_error_sub_id= sub_id;
mysql_mutex_unlock(&entry->LOCK_parallel_entry);
DBUG_EXECUTE_IF("hold_worker_on_schedule", {
if (entry->stop_on_error_sub_id < (uint64)ULONGLONG_MAX)
{
debug_sync_set_action(thd, STRING_WITH_LEN("now SIGNAL continue_worker"));
}
});
DBUG_EXECUTE_IF("rpl_parallel_simulate_wait_at_retry", {
if (rgi->current_gtid.seq_no == 1000) {
......@@ -1136,6 +1142,13 @@ handle_rpl_parallel_thread(void *arg)
bool did_enter_cond= false;
PSI_stage_info old_stage;
DBUG_EXECUTE_IF("hold_worker_on_schedule", {
if (rgi->current_gtid.domain_id == 0 &&
rgi->current_gtid.seq_no == 100) {
debug_sync_set_action(thd,
STRING_WITH_LEN("now SIGNAL reached_pause WAIT_FOR continue_worker"));
}
});
DBUG_EXECUTE_IF("rpl_parallel_scheduled_gtid_0_x_100", {
if (rgi->current_gtid.domain_id == 0 &&
rgi->current_gtid.seq_no == 100) {
......@@ -1177,7 +1190,10 @@ handle_rpl_parallel_thread(void *arg)
skip_event_group= do_gco_wait(rgi, gco, &did_enter_cond, &old_stage);
if (unlikely(entry->stop_on_error_sub_id <= rgi->wait_commit_sub_id))
{
skip_event_group= true;
rgi->worker_error= 1;
}
if (likely(!skip_event_group))
do_ftwrl_wait(rgi, &did_enter_cond, &old_stage);
......
/* Copyright (c) 2007, 2013, Oracle and/or its affiliates.
Copyright (c) 2008, 2014, SkySQL Ab.
Copyright (c) 2008, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -319,7 +319,7 @@ unpack_row(rpl_group_info *rgi,
normal unpack operation.
*/
uint16 const metadata= tabledef->field_metadata(i);
uchar const *const old_pack_ptr= pack_ptr;
IF_DBUG(uchar const *const old_pack_ptr= pack_ptr;,)
pack_ptr= f->unpack(f->ptr, pack_ptr, row_end, metadata);
DBUG_PRINT("debug", ("field: %s; metadata: 0x%x;"
......@@ -338,10 +338,9 @@ unpack_row(rpl_group_info *rgi,
Galera Node throws "Could not read field" error and drops out of cluster
*/
WSREP_WARN("ROW event unpack field: %s metadata: 0x%x;"
" pack_ptr: %p; conv_table %p conv_field %p table %s"
" conv_table %p conv_field %p table %s"
" row_end: %p",
f->field_name.str, metadata,
old_pack_ptr, conv_table, conv_field,
f->field_name.str, metadata, conv_table, conv_field,
(table_found) ? "found" : "not found", row_end
);
}
......
......@@ -61,7 +61,7 @@ bool Sql_cmd_alter_table_exchange_partition::execute(THD *thd)
referenced from this structure will be modified.
@todo move these into constructor...
*/
HA_CREATE_INFO create_info(lex->create_info);
IF_DBUG(HA_CREATE_INFO create_info(lex->create_info);,)
Alter_info alter_info(lex->alter_info, thd->mem_root);
ulong priv_needed= ALTER_ACL | DROP_ACL | INSERT_ACL | CREATE_ACL;
......
......@@ -4132,16 +4132,16 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
/* Use packed keys for long strings on the first column */
if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) &&
!((create_info->table_options & HA_OPTION_NO_PACK_KEYS)) &&
(key_part_length >= KEY_DEFAULT_PACK_LENGTH &&
(sql_field->real_field_type() == MYSQL_TYPE_STRING ||
sql_field->real_field_type() == MYSQL_TYPE_VARCHAR ||
sql_field->pack_flag & FIELDFLAG_BLOB))&& !is_hash_field_needed)
(key_part_length >= KEY_DEFAULT_PACK_LENGTH &&
(sql_field->real_field_type() == MYSQL_TYPE_STRING ||
sql_field->real_field_type() == MYSQL_TYPE_VARCHAR ||
f_is_blob(sql_field->pack_flag))) && !is_hash_field_needed)
{
if ((column_nr == 0 && (sql_field->pack_flag & FIELDFLAG_BLOB)) ||
if ((column_nr == 0 && f_is_blob(sql_field->pack_flag)) ||
sql_field->real_field_type() == MYSQL_TYPE_VARCHAR)
key_info->flags|= HA_BINARY_PACK_KEY | HA_VAR_LENGTH_KEY;
else
key_info->flags|= HA_PACK_KEY;
key_info->flags|= HA_BINARY_PACK_KEY | HA_VAR_LENGTH_KEY;
else
key_info->flags|= HA_PACK_KEY;
}
/* Check if the key segment is partial, set the key flag accordingly */
if (key_part_length != sql_field->key_length &&
......@@ -11145,7 +11145,8 @@ bool Sql_cmd_create_table_like::execute(THD *thd)
LEX *lex= thd->lex;
SELECT_LEX *select_lex= lex->first_select_lex();
TABLE_LIST *first_table= select_lex->table_list.first;
DBUG_ASSERT(first_table == lex->query_tables && first_table != 0);
DBUG_ASSERT(first_table == lex->query_tables);
DBUG_ASSERT(first_table != 0);
bool link_to_local;
TABLE_LIST *create_table= first_table;
TABLE_LIST *select_tables= lex->create_last_non_select_table->next_global;
......
/* Copyright (c) 2002, 2011, Oracle and/or its affiliates.
Copyright (c) 2010, 2013, Monty Program Ab.
Copyright (c) 2010, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......
......@@ -87,25 +87,11 @@ static void wsrep_replication_process(THD *thd,
static bool create_wsrep_THD(Wsrep_thd_args* args)
{
ulong old_wsrep_running_threads= wsrep_running_threads;
#ifdef HAVE_PSI_THREAD_INTERFACE
PSI_thread_key key;
switch (args->thread_type())
{
case WSREP_APPLIER_THREAD:
key= key_wsrep_applier;
break;
case WSREP_ROLLBACKER_THREAD:
key= key_wsrep_rollbacker;
break;
default:
assert(0);
key= 0;
break;
}
#endif
bool res= mysql_thread_create(key, args->thread_id(), &connection_attrib,
DBUG_ASSERT(args->thread_type() == WSREP_APPLIER_THREAD ||
args->thread_type() == WSREP_ROLLBACKER_THREAD);
bool res= mysql_thread_create(args->thread_type() == WSREP_APPLIER_THREAD
? key_wsrep_applier : key_wsrep_rollbacker,
args->thread_id(), &connection_attrib,
start_wsrep_THD, (void*)args);
/*
if starting a thread on server startup, wait until the this thread's THD
......
......@@ -2517,8 +2517,8 @@ static void fil_crypt_rotation_list_fill()
release fil_system.mutex. */
space->n_pending_ops++;
#ifndef DBUG_OFF
fil_space_t* s= fil_system.read_page0(
space->id);
ut_d(const fil_space_t* s=)
fil_system.read_page0(space->id);
ut_ad(!s || s == space);
#endif
space->n_pending_ops--;
......
sphinx : MDEV-10986, MDEV-10985
union-5539 : MDEV-10986, MDEV-10985
......@@ -23,6 +23,8 @@ return "'indexer' binary not found" unless $exe_sphinx_indexer;
my $exe_sphinx_searchd = &locate_sphinx_binary('searchd');
return "'searchd' binary not found" unless $exe_sphinx_searchd;
my $sphinx_config= "$::opt_vardir/my_sphinx.conf";
# Check for Sphinx engine
return "SphinxSE not found" unless $ENV{HA_SPHINX_SO} or $::mysqld_variables{'sphinx'} eq "ON";
......@@ -95,11 +97,38 @@ sub searchd_start {
&::mtr_verbose("Started $sphinx->{proc}");
}
sub wait_exp_backoff {
my $timeout= shift; # Seconds
my $start_wait= shift; # Seconds
my $scale_factor= shift;
$searchd_status= "$exe_sphinx_searchd --status" .
" --config $sphinx_config > /dev/null 2>&1";
my $scale= $start_wait;
my $total_sleep= 0;
while (1) {
my $status = system($searchd_status);
if (not $status) {
return 0;
}
if ($total_sleep >= $timeout) {
last;
}
&::mtr_milli_sleep($scale * 1000);
$total_sleep+= $scale;
$scale*= $scale_factor;
}
&::mtr_warning("Getting a response from searchd timed out");
return 1
}
sub searchd_wait {
my ($sphinx) = @_; # My::Config::Group
return not &::sleep_until_file_created($sphinx->value('pid_file'), 20,
$sphinx->{'proc'})
return wait_exp_backoff(30, 0.1, 2)
}
############# declaration methods ######################
......
......@@ -5,10 +5,12 @@ id w query
2 1 ;mode=extended2;limit=1000000;maxmatches=500
3 1 ;mode=extended2;limit=1000000;maxmatches=500
4 1 ;mode=extended2;limit=1000000;maxmatches=500
5 1 ;mode=extended2;limit=1000000;maxmatches=500
SELECT a.* FROM (SELECT * FROM ts si WHERE si.query='@* 123nothingtofind123;mode=extended2;limit=1000000;maxmatches=500') AS a UNION SELECT b.* FROM (SELECT * FROM ts si WHERE si.query=';mode=extended2;limit=1000000;maxmatches=500') AS b;
id w query
1 1 ;mode=extended2;limit=1000000;maxmatches=500
2 1 ;mode=extended2;limit=1000000;maxmatches=500
3 1 ;mode=extended2;limit=1000000;maxmatches=500
4 1 ;mode=extended2;limit=1000000;maxmatches=500
5 1 ;mode=extended2;limit=1000000;maxmatches=500
drop table ts;
......@@ -825,6 +825,11 @@ static int skip_colon(json_engine_t *j)
static int skip_key(json_engine_t *j)
{
int t_next, c_len;
if (json_instr_chr_map[j->s.c_next] == S_BKSL &&
json_handle_esc(&j->s))
return 1;
while (json_read_keyname_chr(j) == 0) {}
if (j->s.error)
......
......@@ -8,7 +8,7 @@ if [ $1 = 0 ] ; then
if [ -x %{_sysconfdir}/init.d/mysql ] ; then
%{_sysconfdir}/init.d/mysql stop > /dev/null
if [ -x /sbin/chkconfig ] ; then
/sbin/chkconfig --del mysql > /dev/null 2>&1
/sbin/chkconfig --del mysql > /dev/null 2>&1 || :
fi
fi
fi
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment