Commit d81f6d61 authored by Chad MILLER's avatar Chad MILLER

Merge from team tree.

parents 9b1b1d50 ecfdc356
#!/bin/sh - #!/bin/sh -
# $Id: s_all,v 1.10 2001/08/04 14:01:44 bostic Exp $ # $Id: s_all,v 1.10 2001/08/04 14:01:44 bostic Exp $
# Search an AWK program, use GNU awk if available
for x in gawk awk ; do
if type $x; then
AWK=$x
break
fi
done
if test -z "$AWK"; then
echo 'No AWK program found'
exit 1
fi
export AWK
# end of AWK search
sh s_dir sh s_dir
#sh s_perm # permissions. #sh s_perm # permissions.
......
...@@ -79,7 +79,7 @@ for i in db btree clib common dbreg env fileops hash hmac \ ...@@ -79,7 +79,7 @@ for i in db btree clib common dbreg env fileops hash hmac \
[ $i = os ] && f="$f ../os_win32/*.c" [ $i = os ] && f="$f ../os_win32/*.c"
[ $i = rpc_server ] && f="../$i/c/*.c" [ $i = rpc_server ] && f="../$i/c/*.c"
[ $i = crypto ] && f="../$i/*.c ../$i/*/*.c" [ $i = crypto ] && f="../$i/*.c ../$i/*/*.c"
awk -f gen_inc.awk \ $AWK -f gen_inc.awk \
-v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \
-v e_dfile=$e_dfile \ -v e_dfile=$e_dfile \
-v e_pfile=$e_pfile \ -v e_pfile=$e_pfile \
...@@ -97,7 +97,7 @@ done ...@@ -97,7 +97,7 @@ done
# files. # files.
for i in dbm hsearch; do for i in dbm hsearch; do
f="../$i/*.c" f="../$i/*.c"
awk -f gen_inc.awk \ $AWK -f gen_inc.awk \
-v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \
-v e_dfile=$e_dfile \ -v e_dfile=$e_dfile \
-v e_pfile=$e_pfile \ -v e_pfile=$e_pfile \
...@@ -142,7 +142,7 @@ head space defonly _DB_EXT_185_DEF_IN_ > $e_dfile ...@@ -142,7 +142,7 @@ head space defonly _DB_EXT_185_DEF_IN_ > $e_dfile
head space _DB_EXT_185_PROT_IN_ > $e_pfile head space _DB_EXT_185_PROT_IN_ > $e_pfile
f="../db185/*.c" f="../db185/*.c"
awk -f gen_inc.awk \ $AWK -f gen_inc.awk \
-v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \ -v db_version_unique_name=$DB_VERSION_UNIQUE_NAME \
-v e_dfile=$e_dfile \ -v e_dfile=$e_dfile \
-v e_pfile=$e_pfile \ -v e_pfile=$e_pfile \
......
...@@ -45,7 +45,7 @@ cmp $loglist $f > /dev/null 2>&1 || ...@@ -45,7 +45,7 @@ cmp $loglist $f > /dev/null 2>&1 ||
for i in db dbreg btree fileops hash qam txn; do for i in db dbreg btree fileops hash qam txn; do
for f in ../$i/*.src; do for f in ../$i/*.src; do
subsystem=`basename $f .src` subsystem=`basename $f .src`
awk -f gen_rec.awk \ $AWK -f gen_rec.awk \
-v source_file=$source \ -v source_file=$source \
-v header_file=$header \ -v header_file=$header \
-v template_file=$template < $f -v template_file=$template < $f
......
...@@ -43,7 +43,7 @@ rm -f $client_file \ ...@@ -43,7 +43,7 @@ rm -f $client_file \
xidsize=\ xidsize=\
`awk '/^#define/ { if ($2 == "DB_XIDDATASIZE") { print $3 }}' $dbinc_file` `awk '/^#define/ { if ($2 == "DB_XIDDATASIZE") { print $3 }}' $dbinc_file`
awk -f gen_rpc.awk \ $AWK -f gen_rpc.awk \
-v major=$DB_VERSION_MAJOR \ -v major=$DB_VERSION_MAJOR \
-v minor=$DB_VERSION_MINOR \ -v minor=$DB_VERSION_MINOR \
-v xidsize=$xidsize \ -v xidsize=$xidsize \
......
...@@ -7,7 +7,7 @@ AC_INIT(sql/mysqld.cc) ...@@ -7,7 +7,7 @@ AC_INIT(sql/mysqld.cc)
AC_CANONICAL_SYSTEM AC_CANONICAL_SYSTEM
# The Docs Makefile.am parses this line! # The Docs Makefile.am parses this line!
# remember to also change ndb version below and update version.c in ndb # remember to also change ndb version below and update version.c in ndb
AM_INIT_AUTOMAKE(mysql, 5.0.76) AM_INIT_AUTOMAKE(mysql, 5.0.77)
AM_CONFIG_HEADER([include/config.h:config.h.in]) AM_CONFIG_HEADER([include/config.h:config.h.in])
PROTOCOL_VERSION=10 PROTOCOL_VERSION=10
...@@ -23,7 +23,7 @@ NDB_SHARED_LIB_VERSION=$NDB_SHARED_LIB_MAJOR_VERSION:0:0 ...@@ -23,7 +23,7 @@ NDB_SHARED_LIB_VERSION=$NDB_SHARED_LIB_MAJOR_VERSION:0:0
# ndb version # ndb version
NDB_VERSION_MAJOR=5 NDB_VERSION_MAJOR=5
NDB_VERSION_MINOR=0 NDB_VERSION_MINOR=0
NDB_VERSION_BUILD=76 NDB_VERSION_BUILD=77
NDB_VERSION_STATUS="" NDB_VERSION_STATUS=""
# Set all version vars based on $VERSION. How do we do this more elegant ? # Set all version vars based on $VERSION. How do we do this more elegant ?
......
...@@ -655,3 +655,80 @@ show status like 'Last_query_cost'; ...@@ -655,3 +655,80 @@ show status like 'Last_query_cost';
Variable_name Value Variable_name Value
Last_query_cost 794.837037 Last_query_cost 794.837037
drop table t1,t2,t3,t4,t5,t6,t7; drop table t1,t2,t3,t4,t5,t6,t7;
CREATE TABLE t1 (a int, b int, d int, i int);
INSERT INTO t1 VALUES (1,1,1,1);
CREATE TABLE t2 (b int, c int, j int);
INSERT INTO t2 VALUES (1,1,1);
CREATE TABLE t2_1 (j int);
INSERT INTO t2_1 VALUES (1);
CREATE TABLE t3 (c int, f int);
INSERT INTO t3 VALUES (1,1);
CREATE TABLE t3_1 (f int);
INSERT INTO t3_1 VALUES (1);
CREATE TABLE t4 (d int, e int, k int);
INSERT INTO t4 VALUES (1,1,1);
CREATE TABLE t4_1 (k int);
INSERT INTO t4_1 VALUES (1);
CREATE TABLE t5 (g int, d int, h int, l int);
INSERT INTO t5 VALUES (1,1,1,1);
CREATE TABLE t5_1 (l int);
INSERT INTO t5_1 VALUES (1);
SET optimizer_search_depth = 3;
SELECT 1
FROM t1
LEFT JOIN (
t2 JOIN t3 ON t3.c = t2.c
) ON t2.b = t1.b
LEFT JOIN (
t4 JOIN t5 ON t5.d = t4.d
) ON t4.d = t1.d
;
1
1
SELECT 1
FROM t1
LEFT JOIN (
t2 LEFT JOIN (t3 JOIN t3_1 ON t3.f = t3_1.f) ON t3.c = t2.c
) ON t2.b = t1.b
LEFT JOIN (
t4 JOIN t5 ON t5.d = t4.d
) ON t4.d = t1.d
;
1
1
SELECT 1
FROM t1
LEFT JOIN (
(t2 JOIN t2_1 ON t2.j = t2_1.j) JOIN t3 ON t3.c = t2.c
) ON t2.b = t1.b
LEFT JOIN (
t4 JOIN t5 ON t5.d = t4.d
) ON t4.d = t1.d
;
1
1
SELECT 1
FROM t1
LEFT JOIN (
t2 JOIN t3 ON t3.c = t2.c
) ON t2.b = t1.b
LEFT JOIN (
(t4 JOIN t4_1 ON t4.k = t4_1.k) LEFT JOIN t5 ON t5.d = t4.d
) ON t4.d = t1.d
;
1
1
SELECT 1
FROM t1
LEFT JOIN (
t2 JOIN t3 ON t3.c = t2.c
) ON t2.b = t1.b
LEFT JOIN (
t4 LEFT JOIN (t5 JOIN t5_1 ON t5.l = t5_1.l) ON t5.d = t4.d
) ON t4.d = t1.d
;
1
1
SET optimizer_search_depth = DEFAULT;
DROP TABLE t1,t2,t2_1,t3,t3_1,t4,t4_1,t5,t5_1;
End of 5.0 tests
...@@ -424,3 +424,10 @@ select f1 from t1 group by f1 having max(f1)=f1; ...@@ -424,3 +424,10 @@ select f1 from t1 group by f1 having max(f1)=f1;
f1 f1
set session sql_mode=''; set session sql_mode='';
drop table t1; drop table t1;
CREATE TABLE t1 ( a INT, b INT);
INSERT INTO t1 VALUES (1, 1), (2,2), (3, NULL);
SELECT b, COUNT(DISTINCT a) FROM t1 GROUP BY b HAVING b is NULL;
b COUNT(DISTINCT a)
NULL 1
DROP TABLE t1;
End of 5.0 tests
...@@ -66,10 +66,10 @@ a ...@@ -66,10 +66,10 @@ a
2 2
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 3 Qcache_queries_in_cache 6
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 3 Qcache_hits 0
insert into t1 values (3); insert into t1 values (3);
insert into t2 values (3); insert into t2 values (3);
insert into t1 values (4); insert into t1 values (4);
...@@ -90,14 +90,14 @@ a ...@@ -90,14 +90,14 @@ a
2 2
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 1 Qcache_queries_in_cache 2
show status like "Qcache_hits"; show status like "Qcache_hits";
Variable_name Value Variable_name Value
Qcache_hits 4 Qcache_hits 1
commit; commit;
show status like "Qcache_queries_in_cache"; show status like "Qcache_queries_in_cache";
Variable_name Value Variable_name Value
Qcache_queries_in_cache 1 Qcache_queries_in_cache 2
drop table t3,t2,t1; drop table t3,t2,t1;
CREATE TABLE t1 (id int(11) NOT NULL auto_increment, PRIMARY KEY (id)) ENGINE=InnoDB; CREATE TABLE t1 (id int(11) NOT NULL auto_increment, PRIMARY KEY (id)) ENGINE=InnoDB;
select count(*) from t1; select count(*) from t1;
......
...@@ -3,3 +3,10 @@ execute stmt1; ...@@ -3,3 +3,10 @@ execute stmt1;
Id User Host db Command Time State Info Id User Host db Command Time State Info
number root localhost test Query time NULL show full processlist number root localhost test Query time NULL show full processlist
deallocate prepare stmt1; deallocate prepare stmt1;
FLUSH STATUS;
SHOW GLOBAL STATUS LIKE 'com_select';
Variable_name Value
Com_select 101
SHOW GLOBAL STATUS LIKE 'com_select';
Variable_name Value
Com_select 101
...@@ -1681,3 +1681,54 @@ Qcache_hits 1 ...@@ -1681,3 +1681,54 @@ Qcache_hits 1
DROP TABLE t1; DROP TABLE t1;
SET GLOBAL concurrent_insert= @save_concurrent_insert; SET GLOBAL concurrent_insert= @save_concurrent_insert;
SET GLOBAL query_cache_size= default; SET GLOBAL query_cache_size= default;
DROP TABLE IF EXISTS t1;
FLUSH STATUS;
SET GLOBAL query_cache_size=1048576;
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
SHOW STATUS LIKE 'Qcache_queries_in_cache';
Variable_name Value
Qcache_queries_in_cache 0
SELECT * FROM t1;
a
1
2
3
4
5
BEGIN;
SELECT * FROM t1;
a
1
2
3
4
5
COMMIT;
SHOW STATUS LIKE 'Qcache_queries_in_cache';
Variable_name Value
Qcache_queries_in_cache 2
SHOW STATUS LIKE "Qcache_hits";
Variable_name Value
Qcache_hits 0
SELECT * FROM t1;
a
1
2
3
4
5
BEGIN;
SELECT * FROM t1;
a
1
2
3
4
5
COMMIT;
SHOW STATUS LIKE "Qcache_hits";
Variable_name Value
Qcache_hits 2
DROP TABLE t1;
SET GLOBAL query_cache_size= default;
...@@ -345,3 +345,19 @@ id ...@@ -345,3 +345,19 @@ id
drop table t1; drop table t1;
drop function f1; drop function f1;
set GLOBAL query_cache_size=0; set GLOBAL query_cache_size=0;
DROP TABLE IF EXISTS t1;
FLUSH STATUS;
SET GLOBAL query_cache_size=1048576;
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
SHOW STATUS LIKE 'Qcache_queries_in_cache';
Variable_name Value
Qcache_queries_in_cache 0
LOCK TABLES t1 WRITE;
SELECT * FROM t1;
UNLOCK TABLES;
SHOW STATUS LIKE 'Qcache_queries_in_cache';
Variable_name Value
Qcache_queries_in_cache 0
DROP TABLE t1;
SET GLOBAL query_cache_size= default;
...@@ -311,3 +311,76 @@ explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and ...@@ -311,3 +311,76 @@ explain select t1.c11 from t7, t6, t5, t4, t3, t2, t1 where t1.c11 = t2.c21 and
show status like 'Last_query_cost'; show status like 'Last_query_cost';
drop table t1,t2,t3,t4,t5,t6,t7; drop table t1,t2,t3,t4,t5,t6,t7;
#
# Bug # 38795: Automatic search depth and nested join's results in server
# crash
#
CREATE TABLE t1 (a int, b int, d int, i int); INSERT INTO t1 VALUES (1,1,1,1);
CREATE TABLE t2 (b int, c int, j int); INSERT INTO t2 VALUES (1,1,1);
CREATE TABLE t2_1 (j int); INSERT INTO t2_1 VALUES (1);
CREATE TABLE t3 (c int, f int); INSERT INTO t3 VALUES (1,1);
CREATE TABLE t3_1 (f int); INSERT INTO t3_1 VALUES (1);
CREATE TABLE t4 (d int, e int, k int); INSERT INTO t4 VALUES (1,1,1);
CREATE TABLE t4_1 (k int); INSERT INTO t4_1 VALUES (1);
CREATE TABLE t5 (g int, d int, h int, l int); INSERT INTO t5 VALUES (1,1,1,1);
CREATE TABLE t5_1 (l int); INSERT INTO t5_1 VALUES (1);
SET optimizer_search_depth = 3;
SELECT 1
FROM t1
LEFT JOIN (
t2 JOIN t3 ON t3.c = t2.c
) ON t2.b = t1.b
LEFT JOIN (
t4 JOIN t5 ON t5.d = t4.d
) ON t4.d = t1.d
;
SELECT 1
FROM t1
LEFT JOIN (
t2 LEFT JOIN (t3 JOIN t3_1 ON t3.f = t3_1.f) ON t3.c = t2.c
) ON t2.b = t1.b
LEFT JOIN (
t4 JOIN t5 ON t5.d = t4.d
) ON t4.d = t1.d
;
SELECT 1
FROM t1
LEFT JOIN (
(t2 JOIN t2_1 ON t2.j = t2_1.j) JOIN t3 ON t3.c = t2.c
) ON t2.b = t1.b
LEFT JOIN (
t4 JOIN t5 ON t5.d = t4.d
) ON t4.d = t1.d
;
SELECT 1
FROM t1
LEFT JOIN (
t2 JOIN t3 ON t3.c = t2.c
) ON t2.b = t1.b
LEFT JOIN (
(t4 JOIN t4_1 ON t4.k = t4_1.k) LEFT JOIN t5 ON t5.d = t4.d
) ON t4.d = t1.d
;
SELECT 1
FROM t1
LEFT JOIN (
t2 JOIN t3 ON t3.c = t2.c
) ON t2.b = t1.b
LEFT JOIN (
t4 LEFT JOIN (t5 JOIN t5_1 ON t5.l = t5_1.l) ON t5.d = t4.d
) ON t4.d = t1.d
;
SET optimizer_search_depth = DEFAULT;
DROP TABLE t1,t2,t2_1,t3,t3_1,t4,t4_1,t5,t5_1;
--echo End of 5.0 tests
...@@ -432,3 +432,14 @@ select f1 from t1 having max(f1)=f1; ...@@ -432,3 +432,14 @@ select f1 from t1 having max(f1)=f1;
select f1 from t1 group by f1 having max(f1)=f1; select f1 from t1 group by f1 having max(f1)=f1;
set session sql_mode=''; set session sql_mode='';
drop table t1; drop table t1;
#
# Bug #38637: COUNT DISTINCT prevents NULL testing in HAVING clause
#
CREATE TABLE t1 ( a INT, b INT);
INSERT INTO t1 VALUES (1, 1), (2,2), (3, NULL);
SELECT b, COUNT(DISTINCT a) FROM t1 GROUP BY b HAVING b is NULL;
DROP TABLE t1;
--echo End of 5.0 tests
...@@ -16,3 +16,31 @@ execute stmt1; ...@@ -16,3 +16,31 @@ execute stmt1;
deallocate prepare stmt1; deallocate prepare stmt1;
# End of 4.1 tests # End of 4.1 tests
#
# Bug#31222: com_% global status counters behave randomly with
# mysql_change_user.
#
FLUSH STATUS;
--disable_result_log
--disable_query_log
let $i = 100;
while ($i)
{
dec $i;
SELECT 1;
}
--enable_query_log
--enable_result_log
SHOW GLOBAL STATUS LIKE 'com_select';
--change_user
SHOW GLOBAL STATUS LIKE 'com_select';
...@@ -1276,4 +1276,31 @@ DROP TABLE t1; ...@@ -1276,4 +1276,31 @@ DROP TABLE t1;
SET GLOBAL concurrent_insert= @save_concurrent_insert; SET GLOBAL concurrent_insert= @save_concurrent_insert;
SET GLOBAL query_cache_size= default; SET GLOBAL query_cache_size= default;
#
# Bug#36326: nested transaction and select
#
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
FLUSH STATUS;
SET GLOBAL query_cache_size=1048576;
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
SHOW STATUS LIKE 'Qcache_queries_in_cache';
SELECT * FROM t1;
BEGIN;
SELECT * FROM t1;
COMMIT;
SHOW STATUS LIKE 'Qcache_queries_in_cache';
SHOW STATUS LIKE "Qcache_hits";
SELECT * FROM t1;
BEGIN;
SELECT * FROM t1;
COMMIT;
SHOW STATUS LIKE "Qcache_hits";
DROP TABLE t1;
SET GLOBAL query_cache_size= default;
# End of 5.0 tests # End of 5.0 tests
...@@ -222,3 +222,34 @@ disconnect con2; ...@@ -222,3 +222,34 @@ disconnect con2;
connection default; connection default;
set GLOBAL query_cache_size=0; set GLOBAL query_cache_size=0;
#
# Bug#40264: Aborted cached query causes query to hang indefinitely on next cache hit
#
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
FLUSH STATUS;
SET GLOBAL query_cache_size=1048576;
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
SHOW STATUS LIKE 'Qcache_queries_in_cache';
LOCK TABLES t1 WRITE;
connect(con1,localhost,root,,);
--send SELECT * FROM t1
connection default;
let $show_type= open tables where `table`='t1' and in_use=2;
let $show_pattern= '%t1%2%';
--source include/wait_show_pattern.inc
dirty_close con1;
UNLOCK TABLES;
let $show_type= open tables where `table`='t1' and in_use=0;
let $show_pattern= '%t1%0%';
--source include/wait_show_pattern.inc
SHOW STATUS LIKE 'Qcache_queries_in_cache';
DROP TABLE t1;
SET GLOBAL query_cache_size= default;
# End of 5.0 tests
...@@ -2041,6 +2041,12 @@ bool Item_field::val_bool_result() ...@@ -2041,6 +2041,12 @@ bool Item_field::val_bool_result()
} }
bool Item_field::is_null_result()
{
return (null_value=result_field->is_null());
}
bool Item_field::eq(const Item *item, bool binary_cmp) const bool Item_field::eq(const Item *item, bool binary_cmp) const
{ {
Item *real_item= ((Item *) item)->real_item(); Item *real_item= ((Item *) item)->real_item();
...@@ -5629,6 +5635,15 @@ double Item_ref::val_result() ...@@ -5629,6 +5635,15 @@ double Item_ref::val_result()
} }
bool Item_ref::is_null_result()
{
if (result_field)
return (null_value=result_field->is_null());
return is_null();
}
longlong Item_ref::val_int_result() longlong Item_ref::val_int_result()
{ {
if (result_field) if (result_field)
...@@ -5734,7 +5749,9 @@ String *Item_ref::val_str(String* tmp) ...@@ -5734,7 +5749,9 @@ String *Item_ref::val_str(String* tmp)
bool Item_ref::is_null() bool Item_ref::is_null()
{ {
DBUG_ASSERT(fixed); DBUG_ASSERT(fixed);
return (*ref)->is_null(); bool tmp=(*ref)->is_null_result();
null_value=(*ref)->null_value;
return tmp;
} }
......
...@@ -652,6 +652,7 @@ public: ...@@ -652,6 +652,7 @@ public:
virtual my_decimal *val_decimal_result(my_decimal *val) virtual my_decimal *val_decimal_result(my_decimal *val)
{ return val_decimal(val); } { return val_decimal(val); }
virtual bool val_bool_result() { return val_bool(); } virtual bool val_bool_result() { return val_bool(); }
virtual bool is_null_result() { return is_null(); }
/* bit map of tables used by item */ /* bit map of tables used by item */
virtual table_map used_tables() const { return (table_map) 0L; } virtual table_map used_tables() const { return (table_map) 0L; }
...@@ -1301,6 +1302,7 @@ public: ...@@ -1301,6 +1302,7 @@ public:
String *str_result(String* tmp); String *str_result(String* tmp);
my_decimal *val_decimal_result(my_decimal *); my_decimal *val_decimal_result(my_decimal *);
bool val_bool_result(); bool val_bool_result();
bool is_null_result();
bool send(Protocol *protocol, String *str_arg); bool send(Protocol *protocol, String *str_arg);
void reset_field(Field *f); void reset_field(Field *f);
bool fix_fields(THD *, Item **); bool fix_fields(THD *, Item **);
...@@ -1942,6 +1944,7 @@ public: ...@@ -1942,6 +1944,7 @@ public:
String *str_result(String* tmp); String *str_result(String* tmp);
my_decimal *val_decimal_result(my_decimal *); my_decimal *val_decimal_result(my_decimal *);
bool val_bool_result(); bool val_bool_result();
bool is_null_result();
bool send(Protocol *prot, String *tmp); bool send(Protocol *prot, String *tmp);
void make_field(Send_field *field); void make_field(Send_field *field);
bool fix_fields(THD *, Item **); bool fix_fields(THD *, Item **);
......
...@@ -4285,6 +4285,15 @@ my_decimal *Item_func_set_user_var::val_decimal_result(my_decimal *val) ...@@ -4285,6 +4285,15 @@ my_decimal *Item_func_set_user_var::val_decimal_result(my_decimal *val)
} }
bool Item_func_set_user_var::is_null_result()
{
DBUG_ASSERT(fixed == 1);
check(TRUE);
update(); // Store expression
return is_null();
}
void Item_func_set_user_var::print(String *str) void Item_func_set_user_var::print(String *str)
{ {
str->append(STRING_WITH_LEN("(@")); str->append(STRING_WITH_LEN("(@"));
......
...@@ -1302,6 +1302,7 @@ public: ...@@ -1302,6 +1302,7 @@ public:
longlong val_int_result(); longlong val_int_result();
String *str_result(String *str); String *str_result(String *str);
my_decimal *val_decimal_result(my_decimal *); my_decimal *val_decimal_result(my_decimal *);
bool is_null_result();
bool update_hash(void *ptr, uint length, enum Item_result type, bool update_hash(void *ptr, uint length, enum Item_result type,
CHARSET_INFO *cs, Derivation dv, bool unsigned_arg); CHARSET_INFO *cs, Derivation dv, bool unsigned_arg);
bool send(Protocol *protocol, String *str_arg); bool send(Protocol *protocol, String *str_arg);
......
...@@ -651,6 +651,8 @@ struct Query_cache_query_flags ...@@ -651,6 +651,8 @@ struct Query_cache_query_flags
unsigned int client_long_flag:1; unsigned int client_long_flag:1;
unsigned int client_protocol_41:1; unsigned int client_protocol_41:1;
unsigned int more_results_exists:1; unsigned int more_results_exists:1;
unsigned int in_trans:1;
unsigned int autocommit:1;
unsigned int pkt_nr; unsigned int pkt_nr;
uint character_set_client_num; uint character_set_client_num;
uint character_set_results_num; uint character_set_results_num;
......
...@@ -710,7 +710,12 @@ void query_cache_end_of_result(THD *thd) ...@@ -710,7 +710,12 @@ void query_cache_end_of_result(THD *thd)
if (thd->net.query_cache_query == 0) if (thd->net.query_cache_query == 0)
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
if (thd->killed) /*
Check if the NET layer raised a unreported error -- my_error() and
as a consequence query_cache_abort() haven't been called. Abort the
cached result as it might be only partially complete.
*/
if (thd->killed || thd->net.report_error)
{ {
query_cache_abort(&thd->net); query_cache_abort(&thd->net);
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
...@@ -859,6 +864,8 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) ...@@ -859,6 +864,8 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
CLIENT_PROTOCOL_41); CLIENT_PROTOCOL_41);
flags.more_results_exists= test(thd->server_status & flags.more_results_exists= test(thd->server_status &
SERVER_MORE_RESULTS_EXISTS); SERVER_MORE_RESULTS_EXISTS);
flags.in_trans= test(thd->server_status & SERVER_STATUS_IN_TRANS);
flags.autocommit= test(thd->server_status & SERVER_STATUS_AUTOCOMMIT);
flags.pkt_nr= net->pkt_nr; flags.pkt_nr= net->pkt_nr;
flags.character_set_client_num= flags.character_set_client_num=
thd->variables.character_set_client->number; thd->variables.character_set_client->number;
...@@ -879,7 +886,7 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) ...@@ -879,7 +886,7 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, pkt_nr: %d, \ DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, pkt_nr: %d, \
CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \ CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \
sql mode: 0x%lx, sort len: %lu, conncat len: %lu, div_precision: %lu, \ sql mode: 0x%lx, sort len: %lu, conncat len: %lu, div_precision: %lu, \
def_week_frmt: %lu", def_week_frmt: %lu, in_trans: %d, autocommit: %d",
(int)flags.client_long_flag, (int)flags.client_long_flag,
(int)flags.client_protocol_41, (int)flags.client_protocol_41,
(int)flags.more_results_exists, (int)flags.more_results_exists,
...@@ -893,7 +900,10 @@ def_week_frmt: %lu", ...@@ -893,7 +900,10 @@ def_week_frmt: %lu",
flags.max_sort_length, flags.max_sort_length,
flags.group_concat_max_len, flags.group_concat_max_len,
flags.div_precision_increment, flags.div_precision_increment,
flags.default_week_format)); flags.default_week_format,
(int)flags.in_trans,
(int)flags.autocommit));
/* /*
Make InnoDB to release the adaptive hash index latch before Make InnoDB to release the adaptive hash index latch before
acquiring the query cache mutex. acquiring the query cache mutex.
...@@ -1144,6 +1154,8 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) ...@@ -1144,6 +1154,8 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
CLIENT_PROTOCOL_41); CLIENT_PROTOCOL_41);
flags.more_results_exists= test(thd->server_status & flags.more_results_exists= test(thd->server_status &
SERVER_MORE_RESULTS_EXISTS); SERVER_MORE_RESULTS_EXISTS);
flags.in_trans= test(thd->server_status & SERVER_STATUS_IN_TRANS);
flags.autocommit= test(thd->server_status & SERVER_STATUS_AUTOCOMMIT);
flags.pkt_nr= thd->net.pkt_nr; flags.pkt_nr= thd->net.pkt_nr;
flags.character_set_client_num= thd->variables.character_set_client->number; flags.character_set_client_num= thd->variables.character_set_client->number;
flags.character_set_results_num= flags.character_set_results_num=
...@@ -1162,7 +1174,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) ...@@ -1162,7 +1174,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, pkt_nr: %d, \ DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, pkt_nr: %d, \
CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \ CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \
sql mode: 0x%lx, sort len: %lu, conncat len: %lu, div_precision: %lu, \ sql mode: 0x%lx, sort len: %lu, conncat len: %lu, div_precision: %lu, \
def_week_frmt: %lu", def_week_frmt: %lu, in_trans: %d, autocommit: %d",
(int)flags.client_long_flag, (int)flags.client_long_flag,
(int)flags.client_protocol_41, (int)flags.client_protocol_41,
(int)flags.more_results_exists, (int)flags.more_results_exists,
...@@ -1176,7 +1188,9 @@ def_week_frmt: %lu", ...@@ -1176,7 +1188,9 @@ def_week_frmt: %lu",
flags.max_sort_length, flags.max_sort_length,
flags.group_concat_max_len, flags.group_concat_max_len,
flags.div_precision_increment, flags.div_precision_increment,
flags.default_week_format)); flags.default_week_format,
(int)flags.in_trans,
(int)flags.autocommit));
memcpy((void *)(sql + (tot_length - QUERY_CACHE_FLAGS_SIZE)), memcpy((void *)(sql + (tot_length - QUERY_CACHE_FLAGS_SIZE)),
&flags, QUERY_CACHE_FLAGS_SIZE); &flags, QUERY_CACHE_FLAGS_SIZE);
query_block = (Query_cache_block *) hash_search(&queries, (byte*) sql, query_block = (Query_cache_block *) hash_search(&queries, (byte*) sql,
......
...@@ -391,6 +391,10 @@ void THD::init_for_queries() ...@@ -391,6 +391,10 @@ void THD::init_for_queries()
void THD::change_user(void) void THD::change_user(void)
{ {
pthread_mutex_lock(&LOCK_status);
add_to_status(&global_status_var, &status_var);
pthread_mutex_unlock(&LOCK_status);
cleanup(); cleanup();
cleanup_done= 0; cleanup_done= 0;
init(); init();
......
...@@ -100,7 +100,7 @@ static COND* substitute_for_best_equal_field(COND *cond, ...@@ -100,7 +100,7 @@ static COND* substitute_for_best_equal_field(COND *cond,
void *table_join_idx); void *table_join_idx);
static COND *simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, static COND *simplify_joins(JOIN *join, List<TABLE_LIST> *join_list,
COND *conds, bool top); COND *conds, bool top);
static bool check_interleaving_with_nj(JOIN_TAB *last, JOIN_TAB *next); static bool check_interleaving_with_nj(JOIN_TAB *next);
static void restore_prev_nj_state(JOIN_TAB *last); static void restore_prev_nj_state(JOIN_TAB *last);
static void reset_nj_counters(List<TABLE_LIST> *join_list); static void reset_nj_counters(List<TABLE_LIST> *join_list);
static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list, static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list,
...@@ -4718,6 +4718,18 @@ greedy_search(JOIN *join, ...@@ -4718,6 +4718,18 @@ greedy_search(JOIN *join,
*/ */
join->positions[idx]= best_pos; join->positions[idx]= best_pos;
/*
Update the interleaving state after extending the current partial plan
with a new table.
We are doing this here because best_extension_by_limited_search reverts
the interleaving state to the one of the non-extended partial plan
on exit.
*/
IF_DBUG(bool is_interleave_error= )
check_interleaving_with_nj (best_table);
/* This has been already checked by best_extension_by_limited_search */
DBUG_ASSERT(!is_interleave_error);
/* find the position of 'best_table' in 'join->best_ref' */ /* find the position of 'best_table' in 'join->best_ref' */
best_idx= idx; best_idx= idx;
JOIN_TAB *pos= join->best_ref[best_idx]; JOIN_TAB *pos= join->best_ref[best_idx];
...@@ -4735,7 +4747,7 @@ greedy_search(JOIN *join, ...@@ -4735,7 +4747,7 @@ greedy_search(JOIN *join,
--size_remain; --size_remain;
++idx; ++idx;
DBUG_EXECUTE("opt", print_plan(join, join->tables, DBUG_EXECUTE("opt", print_plan(join, idx,
record_count, read_time, read_time, record_count, read_time, read_time,
"extended");); "extended"););
} while (TRUE); } while (TRUE);
...@@ -4886,7 +4898,7 @@ best_extension_by_limited_search(JOIN *join, ...@@ -4886,7 +4898,7 @@ best_extension_by_limited_search(JOIN *join,
table_map real_table_bit= s->table->map; table_map real_table_bit= s->table->map;
if ((remaining_tables & real_table_bit) && if ((remaining_tables & real_table_bit) &&
!(remaining_tables & s->dependent) && !(remaining_tables & s->dependent) &&
(!idx || !check_interleaving_with_nj(join->positions[idx-1].table, s))) (!idx || !check_interleaving_with_nj(s)))
{ {
double current_record_count, current_read_time; double current_record_count, current_read_time;
...@@ -5031,7 +5043,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, ...@@ -5031,7 +5043,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
{ {
table_map real_table_bit=s->table->map; table_map real_table_bit=s->table->map;
if ((rest_tables & real_table_bit) && !(rest_tables & s->dependent) && if ((rest_tables & real_table_bit) && !(rest_tables & s->dependent) &&
(!idx|| !check_interleaving_with_nj(join->positions[idx-1].table, s))) (!idx|| !check_interleaving_with_nj(s)))
{ {
double records, best; double records, best;
best_access_path(join, s, thd, rest_tables, idx, record_count, best_access_path(join, s, thd, rest_tables, idx, record_count,
...@@ -8403,9 +8415,6 @@ static void reset_nj_counters(List<TABLE_LIST> *join_list) ...@@ -8403,9 +8415,6 @@ static void reset_nj_counters(List<TABLE_LIST> *join_list)
SYNOPSIS SYNOPSIS
check_interleaving_with_nj() check_interleaving_with_nj()
join Join being processed
last_tab Last table in current partial join order (this function is
not called for empty partial join orders)
next_tab Table we're going to extend the current partial join with next_tab Table we're going to extend the current partial join with
DESCRIPTION DESCRIPTION
...@@ -8490,10 +8499,10 @@ static void reset_nj_counters(List<TABLE_LIST> *join_list) ...@@ -8490,10 +8499,10 @@ static void reset_nj_counters(List<TABLE_LIST> *join_list)
TRUE Requested join order extension not allowed. TRUE Requested join order extension not allowed.
*/ */
static bool check_interleaving_with_nj(JOIN_TAB *last_tab, JOIN_TAB *next_tab) static bool check_interleaving_with_nj(JOIN_TAB *next_tab)
{ {
TABLE_LIST *next_emb= next_tab->table->pos_in_table_list->embedding; TABLE_LIST *next_emb= next_tab->table->pos_in_table_list->embedding;
JOIN *join= last_tab->join; JOIN *join= next_tab->join;
if (join->cur_embedding_map & ~next_tab->embedding_map) if (join->cur_embedding_map & ~next_tab->embedding_map)
{ {
......
...@@ -16354,6 +16354,63 @@ static void test_bug40365(void) ...@@ -16354,6 +16354,63 @@ static void test_bug40365(void)
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
/**
Bug#36326: nested transaction and select
*/
#ifdef HAVE_QUERY_CACHE
static void test_bug36326()
{
int rc;
DBUG_ENTER("test_bug36326");
myheader("test_bug36326");
rc= mysql_autocommit(mysql, TRUE);
myquery(rc);
rc= mysql_query(mysql, "DROP TABLE IF EXISTS t1");
myquery(rc);
rc= mysql_query(mysql, "CREATE TABLE t1 (a INTEGER)");
myquery(rc);
rc= mysql_query(mysql, "INSERT INTO t1 VALUES (1)");
myquery(rc);
rc= mysql_query(mysql, "SET GLOBAL query_cache_type = 1");
myquery(rc);
rc= mysql_query(mysql, "SET GLOBAL query_cache_size = 1048576");
myquery(rc);
DIE_UNLESS(!(mysql->server_status & SERVER_STATUS_IN_TRANS));
DIE_UNLESS(mysql->server_status & SERVER_STATUS_AUTOCOMMIT);
rc= mysql_query(mysql, "BEGIN");
myquery(rc);
DIE_UNLESS(mysql->server_status & SERVER_STATUS_IN_TRANS);
rc= mysql_query(mysql, "SELECT * FROM t1");
myquery(rc);
rc= my_process_result(mysql);
DIE_UNLESS(rc == 1);
rc= mysql_rollback(mysql);
myquery(rc);
rc= mysql_query(mysql, "ROLLBACK");
myquery(rc);
DIE_UNLESS(!(mysql->server_status & SERVER_STATUS_IN_TRANS));
rc= mysql_query(mysql, "SELECT * FROM t1");
myquery(rc);
DIE_UNLESS(!(mysql->server_status & SERVER_STATUS_IN_TRANS));
rc= my_process_result(mysql);
DIE_UNLESS(rc == 1);
rc= mysql_query(mysql, "DROP TABLE t1");
myquery(rc);
rc= mysql_query(mysql, "SET GLOBAL query_cache_size = 0");
myquery(rc);
DBUG_VOID_RETURN;
}
#endif
/* /*
Read and parse arguments and MySQL options from my.cnf Read and parse arguments and MySQL options from my.cnf
*/ */
...@@ -16652,6 +16709,9 @@ static struct my_tests_st my_tests[]= { ...@@ -16652,6 +16709,9 @@ static struct my_tests_st my_tests[]= {
{ "test_bug40365", test_bug40365 }, { "test_bug40365", test_bug40365 },
#ifdef HAVE_SPATIAL #ifdef HAVE_SPATIAL
{ "test_bug37956", test_bug37956 }, { "test_bug37956", test_bug37956 },
#endif
#ifdef HAVE_QUERY_CACHE
{ "test_bug36326", test_bug36326 },
#endif #endif
{ 0, 0 } { 0, 0 }
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment