Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
593c6db2
Commit
593c6db2
authored
Aug 25, 2010
by
Alexander Nozdrin
Browse files
Options
Browse Files
Download
Plain Diff
Auto-merge from mysql-5.5-merge.
parents
7c7be629
0827d824
Changes
21
Show whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
209 additions
and
15 deletions
+209
-15
mysql-test/include/not_blackhole.inc
mysql-test/include/not_blackhole.inc
+5
-0
mysql-test/r/func_gconcat.result
mysql-test/r/func_gconcat.result
+20
-0
mysql-test/r/func_misc.result
mysql-test/r/func_misc.result
+15
-0
mysql-test/r/func_time.result
mysql-test/r/func_time.result
+8
-0
mysql-test/r/partition_not_blackhole.result
mysql-test/r/partition_not_blackhole.result
+16
-0
mysql-test/std_data/parts/t1_blackhole.frm
mysql-test/std_data/parts/t1_blackhole.frm
+0
-0
mysql-test/std_data/parts/t1_blackhole.par
mysql-test/std_data/parts/t1_blackhole.par
+0
-0
mysql-test/suite/innodb/t/innodb_mysql.test
mysql-test/suite/innodb/t/innodb_mysql.test
+1
-0
mysql-test/suite/rpl/t/rpl_drop.test
mysql-test/suite/rpl/t/rpl_drop.test
+1
-0
mysql-test/t/func_gconcat.test
mysql-test/t/func_gconcat.test
+21
-0
mysql-test/t/func_misc.test
mysql-test/t/func_misc.test
+14
-0
mysql-test/t/func_time.test
mysql-test/t/func_time.test
+11
-0
mysql-test/t/partition_not_blackhole-master.opt
mysql-test/t/partition_not_blackhole-master.opt
+1
-0
mysql-test/t/partition_not_blackhole.test
mysql-test/t/partition_not_blackhole.test
+26
-0
sql/ha_partition.cc
sql/ha_partition.cc
+6
-1
sql/item_func.cc
sql/item_func.cc
+2
-0
sql/item_sum.cc
sql/item_sum.cc
+22
-3
sql/log.h
sql/log.h
+2
-2
sql/sql_select.cc
sql/sql_select.cc
+37
-7
sql/sql_select.h
sql/sql_select.h
+1
-1
sql/table.h
sql/table.h
+0
-1
No files found.
mysql-test/include/not_blackhole.inc
0 → 100644
View file @
593c6db2
if
(
`SELECT count(*) FROM information_schema.engines WHERE
(support = 'YES' OR support = 'DEFAULT') AND
engine = 'blackhole'`
){
skip
Blackhole
engine
enabled
;
}
mysql-test/r/func_gconcat.result
View file @
593c6db2
...
...
@@ -1003,6 +1003,7 @@ SELECT 1 FROM
1
1
DROP TABLE t1;
End of 5.0 tests
#
# Bug #52397: another crash with explain extended and group_concat
#
...
...
@@ -1019,6 +1020,25 @@ Warnings:
Note 1003 select 1 AS `1` from dual
DROP TABLE t1;
End of 5.0 tests
#
# Bug #54476: crash when group_concat and 'with rollup' in prepared statements
#
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1), (2);
PREPARE stmt FROM "SELECT GROUP_CONCAT(t1.a ORDER BY t1.a) FROM t1 JOIN t1 t2 GROUP BY t1.a WITH ROLLUP";
EXECUTE stmt;
GROUP_CONCAT(t1.a ORDER BY t1.a)
1,1
2,2
1,1,2,2
EXECUTE stmt;
GROUP_CONCAT(t1.a ORDER BY t1.a)
1,1
2,2
1,1,2,2
DEALLOCATE PREPARE stmt;
DROP TABLE t1;
End of 5.1 tests
DROP TABLE IF EXISTS t1, t2;
CREATE TABLE t1 (a VARCHAR(6), b INT);
CREATE TABLE t2 (a VARCHAR(6), b INT);
...
...
mysql-test/r/func_misc.result
View file @
593c6db2
...
...
@@ -337,6 +337,21 @@ select connection_id() > 0;
connection_id() > 0
1
#
# Bug #54461: crash with longblob and union or update with subquery
#
CREATE TABLE t1 (a INT, b LONGBLOB);
INSERT INTO t1 VALUES (1, '2'), (2, '3'), (3, '2');
SELECT DISTINCT LEAST(a, (SELECT b FROM t1 LIMIT 1)) FROM t1 UNION SELECT 1;
LEAST(a, (SELECT b FROM t1 LIMIT 1))
1
2
SELECT DISTINCT GREATEST(a, (SELECT b FROM t1 LIMIT 1)) FROM t1 UNION SELECT 1;
GREATEST(a, (SELECT b FROM t1 LIMIT 1))
2
3
1
DROP TABLE t1;
#
# Bug #52165: Assertion failed: file .\dtoa.c, line 465
#
CREATE TABLE t1 (a SET('a'), b INT);
...
...
mysql-test/r/func_time.result
View file @
593c6db2
...
...
@@ -1305,4 +1305,12 @@ date_sub("0069-01-01 00:00:01",INTERVAL 2 SECOND)
select date_sub("0169-01-01 00:00:01",INTERVAL 2 SECOND);
date_sub("0169-01-01 00:00:01",INTERVAL 2 SECOND)
0168-12-31 23:59:59
CREATE TABLE t1(a DOUBLE NOT NULL);
INSERT INTO t1 VALUES (0),(9.216e-096);
# should not crash
SELECT 1 FROM t1 ORDER BY @x:=makedate(a,a);
1
1
1
DROP TABLE t1;
End of 5.1 tests
mysql-test/r/partition_not_blackhole.result
0 → 100644
View file @
593c6db2
DROP TABLE IF EXISTS t1;
#
# Bug#46086: crash when dropping a partitioned table and
# the original engine is disabled
# Copy a .frm and .par file which was created with:
# create table `t1` (`id` int primary key) engine=blackhole
# partition by key () partitions 1;
SHOW TABLES;
Tables_in_test
t1
SHOW CREATE TABLE t1;
ERROR HY000: Incorrect information in file: './test/t1.frm'
DROP TABLE t1;
ERROR 42S02: Unknown table 't1'
t1.frm
t1.par
mysql-test/std_data/parts/t1_blackhole.frm
0 → 100644
View file @
593c6db2
This diff was suppressed by a .gitattributes entry.
mysql-test/std_data/parts/t1_blackhole.par
0 → 100644
View file @
593c6db2
File added
mysql-test/suite/innodb/t/innodb_mysql.test
View file @
593c6db2
...
...
@@ -747,6 +747,7 @@ UNLOCK TABLES;
DROP
TABLE
t1
;
--
echo
End
of
5.1
tests
...
...
mysql-test/suite/rpl/t/rpl_drop.test
View file @
593c6db2
...
...
@@ -10,3 +10,4 @@ drop table t1, t2;
sync_slave_with_master
;
# End of 4.1 tests
mysql-test/t/func_gconcat.test
View file @
593c6db2
...
...
@@ -708,6 +708,7 @@ SELECT 1 FROM
DROP
TABLE
t1
;
--
echo
End
of
5.0
tests
--
echo
#
--
echo
# Bug #52397: another crash with explain extended and group_concat
...
...
@@ -722,6 +723,26 @@ DROP TABLE t1;
--
echo
End
of
5.0
tests
--
echo
#
--
echo
# Bug #54476: crash when group_concat and 'with rollup' in prepared statements
--
echo
#
CREATE
TABLE
t1
(
a
INT
);
INSERT
INTO
t1
VALUES
(
1
),
(
2
);
PREPARE
stmt
FROM
"SELECT GROUP_CONCAT(t1.a ORDER BY t1.a) FROM t1 JOIN t1 t2 GROUP BY t1.a WITH ROLLUP"
;
EXECUTE
stmt
;
EXECUTE
stmt
;
DEALLOCATE
PREPARE
stmt
;
DROP
TABLE
t1
;
--
echo
End
of
5.1
tests
#
# Bug#36785: Wrong error message when group_concat() exceeds max length
#
...
...
mysql-test/t/func_misc.test
View file @
593c6db2
...
...
@@ -467,6 +467,19 @@ select NAME_CONST('_id',1234) as id;
select
connection_id
()
>
0
;
--
echo
#
--
echo
# Bug #54461: crash with longblob and union or update with subquery
--
echo
#
CREATE
TABLE
t1
(
a
INT
,
b
LONGBLOB
);
INSERT
INTO
t1
VALUES
(
1
,
'2'
),
(
2
,
'3'
),
(
3
,
'2'
);
SELECT
DISTINCT
LEAST
(
a
,
(
SELECT
b
FROM
t1
LIMIT
1
))
FROM
t1
UNION
SELECT
1
;
SELECT
DISTINCT
GREATEST
(
a
,
(
SELECT
b
FROM
t1
LIMIT
1
))
FROM
t1
UNION
SELECT
1
;
DROP
TABLE
t1
;
--
echo
#
--
echo
# Bug #52165: Assertion failed: file .\dtoa.c, line 465
--
echo
#
...
...
@@ -478,4 +491,5 @@ SELECT COALESCE(a) = COALESCE(b) FROM t1;
DROP
TABLE
t1
;
--
echo
End
of
tests
mysql-test/t/func_time.test
View file @
593c6db2
...
...
@@ -821,4 +821,15 @@ select date_sub("0069-01-01 00:00:01",INTERVAL 2 SECOND);
select
date_sub
(
"0169-01-01 00:00:01"
,
INTERVAL
2
SECOND
);
#
# Bug #55565: debug assertion when ordering by expressions with user
# variable assignments
#
CREATE
TABLE
t1
(
a
DOUBLE
NOT
NULL
);
INSERT
INTO
t1
VALUES
(
0
),(
9.216e-096
);
--
echo
# should not crash
SELECT
1
FROM
t1
ORDER
BY
@
x
:=
makedate
(
a
,
a
);
DROP
TABLE
t1
;
--
echo
End
of
5.1
tests
mysql-test/t/partition_not_blackhole-master.opt
0 → 100644
View file @
593c6db2
--loose-skip-blackhole
mysql-test/t/partition_not_blackhole.test
0 → 100644
View file @
593c6db2
--
source
include
/
have_partition
.
inc
--
source
include
/
not_blackhole
.
inc
--
disable_warnings
DROP
TABLE
IF
EXISTS
t1
;
--
enable_warnings
let
$MYSQLD_DATADIR
=
`SELECT @@datadir`
;
--
echo
#
--
echo
# Bug#46086: crash when dropping a partitioned table and
--
echo
# the original engine is disabled
--
echo
# Copy a .frm and .par file which was created with:
--
echo
# create table `t1` (`id` int primary key) engine=blackhole
--
echo
# partition by key () partitions 1;
--
copy_file
std_data
/
parts
/
t1_blackhole
.
frm
$MYSQLD_DATADIR
/
test
/
t1
.
frm
--
copy_file
std_data
/
parts
/
t1_blackhole
.
par
$MYSQLD_DATADIR
/
test
/
t1
.
par
SHOW
TABLES
;
--
replace_result
$MYSQLD_DATADIR
./
--
error
ER_NOT_FORM_FILE
SHOW
CREATE
TABLE
t1
;
--
error
ER_BAD_TABLE_ERROR
DROP
TABLE
t1
;
--
list_files
$MYSQLD_DATADIR
/
test
t1
*
--
remove_file
$MYSQLD_DATADIR
/
test
/
t1
.
frm
--
remove_file
$MYSQLD_DATADIR
/
test
/
t1
.
par
sql/ha_partition.cc
View file @
593c6db2
...
...
@@ -2446,9 +2446,14 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root)
tot_partition_words
=
(
m_tot_parts
+
3
)
/
4
;
engine_array
=
(
handlerton
**
)
my_alloca
(
m_tot_parts
*
sizeof
(
handlerton
*
));
for
(
i
=
0
;
i
<
m_tot_parts
;
i
++
)
{
engine_array
[
i
]
=
ha_resolve_by_legacy_type
(
ha_thd
(),
(
enum
legacy_db_type
)
*
(
uchar
*
)
((
file_buffer
)
+
12
+
i
));
*
(
uchar
*
)
((
file_buffer
)
+
12
+
i
));
if
(
!
engine_array
[
i
])
goto
err3
;
}
address_tot_name_len
=
file_buffer
+
12
+
4
*
tot_partition_words
;
tot_name_words
=
(
uint4korr
(
address_tot_name_len
)
+
3
)
/
4
;
if
(
len_words
!=
(
tot_partition_words
+
tot_name_words
+
4
))
...
...
sql/item_func.cc
View file @
593c6db2
...
...
@@ -2533,6 +2533,8 @@ void Item_func_min_max::fix_length_and_dec()
decimals
,
unsigned_flag
));
}
else
if
(
cmp_type
==
REAL_RESULT
)
fix_char_length
(
float_length
(
decimals
));
cached_field_type
=
agg_field_type
(
args
,
arg_count
);
}
...
...
sql/item_sum.cc
View file @
593c6db2
...
...
@@ -984,7 +984,8 @@ bool Aggregator_distinct::add()
{
int
error
;
copy_fields
(
tmp_table_param
);
copy_funcs
(
tmp_table_param
->
items_to_copy
);
if
(
copy_funcs
(
tmp_table_param
->
items_to_copy
,
table
->
in_use
))
return
TRUE
;
for
(
Field
**
field
=
table
->
field
;
*
field
;
field
++
)
if
((
*
field
)
->
is_real_null
(
0
))
...
...
@@ -3058,7 +3059,6 @@ Item_func_group_concat::Item_func_group_concat(THD *thd,
tree
(
item
->
tree
),
unique_filter
(
item
->
unique_filter
),
table
(
item
->
table
),
order
(
item
->
order
),
context
(
item
->
context
),
arg_count_order
(
item
->
arg_count_order
),
arg_count_field
(
item
->
arg_count_field
),
...
...
@@ -3071,6 +3071,24 @@ Item_func_group_concat::Item_func_group_concat(THD *thd,
{
quick_group
=
item
->
quick_group
;
result
.
set_charset
(
collation
.
collation
);
/*
Since the ORDER structures pointed to by the elements of the 'order' array
may be modified in find_order_in_list() called from
Item_func_group_concat::setup(), create a copy of those structures so that
such modifications done in this object would not have any effect on the
object being copied.
*/
ORDER
*
tmp
;
if
(
!
(
order
=
(
ORDER
**
)
thd
->
alloc
(
sizeof
(
ORDER
*
)
*
arg_count_order
+
sizeof
(
ORDER
)
*
arg_count_order
)))
return
;
tmp
=
(
ORDER
*
)(
order
+
arg_count_order
);
for
(
uint
i
=
0
;
i
<
arg_count_order
;
i
++
,
tmp
++
)
{
memcpy
(
tmp
,
item
->
order
[
i
],
sizeof
(
ORDER
));
order
[
i
]
=
tmp
;
}
}
...
...
@@ -3136,7 +3154,8 @@ bool Item_func_group_concat::add()
if
(
always_null
)
return
0
;
copy_fields
(
tmp_table_param
);
copy_funcs
(
tmp_table_param
->
items_to_copy
);
if
(
copy_funcs
(
tmp_table_param
->
items_to_copy
,
table
->
in_use
))
return
TRUE
;
for
(
uint
i
=
0
;
i
<
arg_count_field
;
i
++
)
{
...
...
sql/log.h
View file @
593c6db2
...
...
@@ -394,10 +394,10 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
/* Use this to start writing a new log file */
void
new_file
();
bool
write
(
Log_event
*
event_info
);
bool
write
(
Log_event
*
event_info
);
// binary log write
bool
write
(
THD
*
thd
,
IO_CACHE
*
cache
,
Log_event
*
commit_event
,
bool
incident
);
bool
write_incident
(
THD
*
thd
,
bool
lock
);
bool
write_incident
(
THD
*
thd
,
bool
lock
);
int
write_cache
(
IO_CACHE
*
cache
,
bool
lock_log
,
bool
flush_and_sync
);
void
set_write_error
(
THD
*
thd
);
bool
check_write_error
(
THD
*
thd
);
...
...
sql/sql_select.cc
View file @
593c6db2
...
...
@@ -12723,7 +12723,9 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if
(
!
end_of_records
)
{
copy_fields
(
&
join
->
tmp_table_param
);
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
);
if
(
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
,
join
->
thd
))
DBUG_RETURN
(
NESTED_LOOP_ERROR
);
/* purecov: inspected */
if
(
!
join
->
having
||
join
->
having
->
val_int
())
{
int
error
;
...
...
@@ -12813,7 +12815,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
memcpy
(
table
->
record
[
0
]
+
key_part
->
offset
,
group
->
buff
,
1
);
}
init_tmptable_sum_functions
(
join
->
sum_funcs
);
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
);
if
(
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
,
join
->
thd
))
DBUG_RETURN
(
NESTED_LOOP_ERROR
);
/* purecov: inspected */
if
((
error
=
table
->
file
->
ha_write_row
(
table
->
record
[
0
])))
{
if
(
create_myisam_from_heap
(
join
->
thd
,
table
,
&
join
->
tmp_table_param
,
...
...
@@ -12848,7 +12851,8 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
init_tmptable_sum_functions
(
join
->
sum_funcs
);
copy_fields
(
&
join
->
tmp_table_param
);
// Groups are copied twice.
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
);
if
(
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
,
join
->
thd
))
DBUG_RETURN
(
NESTED_LOOP_ERROR
);
/* purecov: inspected */
if
(
!
(
error
=
table
->
file
->
ha_write_row
(
table
->
record
[
0
])))
join
->
send_records
++
;
// New group
...
...
@@ -12935,7 +12939,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if
(
idx
<
(
int
)
join
->
send_group_parts
)
{
copy_fields
(
&
join
->
tmp_table_param
);
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
);
if
(
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
,
join
->
thd
))
DBUG_RETURN
(
NESTED_LOOP_ERROR
);
if
(
init_sum_functions
(
join
->
sum_funcs
,
join
->
sum_funcs_end
[
idx
+
1
]))
DBUG_RETURN
(
NESTED_LOOP_ERROR
);
if
(
join
->
procedure
)
...
...
@@ -15807,14 +15812,39 @@ update_sum_func(Item_sum **func_ptr)
return
0
;
}
/** Copy result of functions to record in tmp_table. */
/**
Copy result of functions to record in tmp_table.
void
copy_funcs
(
Item
**
func_ptr
)
Uses the thread pointer to check for errors in
some of the val_xxx() methods called by the
save_in_result_field() function.
TODO: make the Item::val_xxx() return error code
@param func_ptr array of the function Items to copy to the tmp table
@param thd pointer to the current thread for error checking
@retval
FALSE if OK
@retval
TRUE on error
*/
bool
copy_funcs
(
Item
**
func_ptr
,
const
THD
*
thd
)
{
Item
*
func
;
for
(;
(
func
=
*
func_ptr
)
;
func_ptr
++
)
{
func
->
save_in_result_field
(
1
);
/*
Need to check the THD error state because Item::val_xxx() don't
return error code, but can generate errors
TODO: change it for a real status check when Item::val_xxx()
are extended to return status code.
*/
if
(
thd
->
is_error
())
return
TRUE
;
}
return
FALSE
;
}
...
...
sql/sql_select.h
View file @
593c6db2
...
...
@@ -606,7 +606,7 @@ bool setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
List
<
Item
>
&
new_list1
,
List
<
Item
>
&
new_list2
,
uint
elements
,
List
<
Item
>
&
fields
);
void
copy_fields
(
TMP_TABLE_PARAM
*
param
);
void
copy_funcs
(
Item
**
func_ptr
);
bool
copy_funcs
(
Item
**
func_ptr
,
const
THD
*
thd
);
bool
create_myisam_from_heap
(
THD
*
thd
,
TABLE
*
table
,
TMP_TABLE_PARAM
*
param
,
int
error
,
bool
ignore_last_dupp_error
);
uint
find_shortest_key
(
TABLE
*
table
,
const
key_map
*
usable_keys
);
...
...
sql/table.h
View file @
593c6db2
...
...
@@ -204,7 +204,6 @@ typedef struct st_order {
struct
st_order
*
next
;
Item
**
item
;
/* Point at item in select fields */
Item
*
item_ptr
;
/* Storage for initial item */
Item
**
item_copy
;
/* For SPs; the original item ptr */
int
counter
;
/* position in SELECT list, correct
only if counter_used is true*/
bool
asc
;
/* true if ascending */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment