Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
d80feb9e
Commit
d80feb9e
authored
Dec 01, 2005
by
timour@mysql.com
Browse files
Options
Browse Files
Download
Plain Diff
Merge mysql.com:/home/timka/mysql/src/5.0-virgin
into mysql.com:/home/timka/mysql/src/5.0-bug-14920
parents
1faac904
999a73ac
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
112 additions
and
12 deletions
+112
-12
mysql-test/r/group_min_max.result
mysql-test/r/group_min_max.result
+31
-0
mysql-test/t/group_min_max.test
mysql-test/t/group_min_max.test
+12
-0
sql/sql_class.cc
sql/sql_class.cc
+1
-0
sql/sql_class.h
sql/sql_class.h
+8
-1
sql/sql_select.cc
sql/sql_select.cc
+54
-11
sql/sql_select.h
sql/sql_select.h
+6
-0
No files found.
mysql-test/r/group_min_max.result
View file @
d80feb9e
...
...
@@ -2012,3 +2012,34 @@ explain select distinct f1, f2 from t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL PRIMARY 5 NULL 3 Using index for group-by; Using temporary
drop table t1;
create table t1 (c1 int not null,c2 int not null, primary key(c1,c2));
insert into t1 (c1,c2) values
(10,1),(10,2),(10,3),(20,4),(20,5),(20,6),(30,7),(30,8),(30,9);
select distinct c1, c2 from t1 order by c2;
c1 c2
10 1
10 2
10 3
20 4
20 5
20 6
30 7
30 8
30 9
select c1,min(c2) as c2 from t1 group by c1 order by c2;
c1 c2
10 1
20 4
30 7
select c1,c2 from t1 group by c1,c2 order by c2;
c1 c2
10 1
10 2
10 3
20 4
20 5
20 6
30 7
30 8
30 9
drop table t1;
mysql-test/t/group_min_max.test
View file @
d80feb9e
...
...
@@ -703,3 +703,15 @@ alter table t1 drop primary key, add primary key (f2, f1);
explain
select
distinct
f1
a
,
f1
b
from
t1
;
explain
select
distinct
f1
,
f2
from
t1
;
drop
table
t1
;
#
# Bug #14920 Ordering aggregated result sets with composite primary keys
# corrupts resultset
#
create
table
t1
(
c1
int
not
null
,
c2
int
not
null
,
primary
key
(
c1
,
c2
));
insert
into
t1
(
c1
,
c2
)
values
(
10
,
1
),(
10
,
2
),(
10
,
3
),(
20
,
4
),(
20
,
5
),(
20
,
6
),(
30
,
7
),(
30
,
8
),(
30
,
9
);
select
distinct
c1
,
c2
from
t1
order
by
c2
;
select
c1
,
min
(
c2
)
as
c2
from
t1
group
by
c1
order
by
c2
;
select
c1
,
c2
from
t1
group
by
c1
,
c2
order
by
c2
;
drop
table
t1
;
sql/sql_class.cc
View file @
d80feb9e
...
...
@@ -1815,6 +1815,7 @@ void TMP_TABLE_PARAM::init()
group_parts
=
group_length
=
group_null_parts
=
0
;
quick_group
=
1
;
table_charset
=
0
;
precomputed_group_by
=
0
;
}
...
...
sql/sql_class.h
View file @
d80feb9e
...
...
@@ -1822,11 +1822,18 @@ public:
uint
convert_blob_length
;
CHARSET_INFO
*
table_charset
;
bool
schema_table
;
/*
True if GROUP BY and its aggregate functions are already computed
by a table access method (e.g. by loose index scan). In this case
query execution should not perform aggregation and should treat
aggregate functions as normal functions.
*/
bool
precomputed_group_by
;
TMP_TABLE_PARAM
()
:
copy_field
(
0
),
group_parts
(
0
),
group_length
(
0
),
group_null_parts
(
0
),
convert_blob_length
(
0
),
schema_table
(
0
)
schema_table
(
0
)
,
precomputed_group_by
(
0
)
{}
~
TMP_TABLE_PARAM
()
{
...
...
sql/sql_select.cc
View file @
d80feb9e
...
...
@@ -1007,6 +1007,20 @@ JOIN::optimize()
}
having
=
0
;
/*
The loose index scan access method guarantees that all grouping or
duplicate row elimination (for distinct) is already performed
during data retrieval, and that all MIN/MAX functions are already
computed for each group. Thus all MIN/MAX functions should be
treated as regular functions, and there is no need to perform
grouping in the main execution loop.
Notice that currently loose index scan is applicable only for
single table queries, thus it is sufficient to test only the first
join_tab element of the plan for its access method.
*/
if
(
join_tab
->
is_using_loose_index_scan
())
tmp_table_param
.
precomputed_group_by
=
TRUE
;
/* Create a tmp table if distinct or if the sort is too complicated */
if
(
need_tmp
)
{
...
...
@@ -1410,6 +1424,15 @@ JOIN::exec()
else
{
/* group data to new table */
/*
If the access method is loose index scan then all MIN/MAX
functions are precomputed, and should be treated as regular
functions. See extended comment in JOIN::exec.
*/
if
(
curr_join
->
join_tab
->
is_using_loose_index_scan
())
curr_join
->
tmp_table_param
.
precomputed_group_by
=
TRUE
;
if
(
!
(
curr_tmp_table
=
exec_tmp_table2
=
create_tmp_table
(
thd
,
&
curr_join
->
tmp_table_param
,
...
...
@@ -8279,6 +8302,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
MEM_ROOT
*
mem_root_save
,
own_root
;
TABLE
*
table
;
uint
i
,
field_count
,
null_count
,
null_pack_length
;
uint
copy_func_count
=
param
->
func_count
;
uint
hidden_null_count
,
hidden_null_pack_length
,
hidden_field_count
;
uint
blob_count
,
group_null_items
,
string_count
;
uint
temp_pool_slot
=
MY_BIT_NONE
;
...
...
@@ -8342,6 +8366,16 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
field_count
=
param
->
field_count
+
param
->
func_count
+
param
->
sum_func_count
;
hidden_field_count
=
param
->
hidden_field_count
;
/*
When loose index scan is employed as access method, it already
computes all groups and the result of all aggregate functions. We
make space for the items of the aggregate function in the list of
functions TMP_TABLE_PARAM::items_to_copy, so that the values of
these items are stored in the temporary table.
*/
if
(
param
->
precomputed_group_by
)
copy_func_count
+=
param
->
sum_func_count
;
init_sql_alloc
(
&
own_root
,
TABLE_ALLOC_BLOCK_SIZE
,
0
);
if
(
!
multi_alloc_root
(
&
own_root
,
...
...
@@ -8349,7 +8383,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
&
reg_field
,
sizeof
(
Field
*
)
*
(
field_count
+
1
),
&
blob_field
,
sizeof
(
uint
)
*
(
field_count
+
1
),
&
from_field
,
sizeof
(
Field
*
)
*
field_count
,
&
copy_func
,
sizeof
(
*
copy_func
)
*
(
param
->
func_count
+
1
),
&
copy_func
,
sizeof
(
*
copy_func
)
*
(
copy_
func_count
+
1
),
&
param
->
keyinfo
,
sizeof
(
*
param
->
keyinfo
),
&
key_part_info
,
sizeof
(
*
key_part_info
)
*
(
param
->
group_parts
+
1
),
...
...
@@ -9241,11 +9275,13 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
Next_select_func
setup_end_select_func
(
JOIN
*
join
)
{
TABLE
*
table
=
join
->
tmp_table
;
TMP_TABLE_PARAM
*
tmp_tbl
=
&
join
->
tmp_table_param
;
Next_select_func
end_select
;
/* Set up select_end */
if
(
table
)
{
if
(
table
->
group
&&
join
->
tmp_table_param
.
sum_func_count
)
if
(
table
->
group
&&
tmp_tbl
->
sum_func_count
)
{
if
(
table
->
s
->
keys
)
{
...
...
@@ -9258,7 +9294,7 @@ Next_select_func setup_end_select_func(JOIN *join)
end_select
=
end_unique_update
;
}
}
else
if
(
join
->
sort_and_group
)
else
if
(
join
->
sort_and_group
&&
!
tmp_tbl
->
precomputed_group_by
)
{
DBUG_PRINT
(
"info"
,(
"Using end_write_group"
));
end_select
=
end_write_group
;
...
...
@@ -9267,19 +9303,27 @@ Next_select_func setup_end_select_func(JOIN *join)
{
DBUG_PRINT
(
"info"
,(
"Using end_write"
));
end_select
=
end_write
;
if
(
tmp_tbl
->
precomputed_group_by
)
{
/*
A preceding call to create_tmp_table in the case when loose
index scan is used guarantees that
TMP_TABLE_PARAM::items_to_copy has enough space for the group
by functions. It is OK here to use memcpy since we copy
Item_sum pointers into an array of Item pointers.
*/
memcpy
(
tmp_tbl
->
items_to_copy
+
tmp_tbl
->
func_count
,
join
->
sum_funcs
,
sizeof
(
Item
*
)
*
tmp_tbl
->
sum_func_count
);
tmp_tbl
->
items_to_copy
[
tmp_tbl
->
func_count
+
tmp_tbl
->
sum_func_count
]
=
0
;
}
}
}
else
{
/* Test if data is accessed via QUICK_GROUP_MIN_MAX_SELECT. */
bool
is_using_quick_group_min_max_select
=
(
join
->
join_tab
->
select
&&
join
->
join_tab
->
select
->
quick
&&
(
join
->
join_tab
->
select
->
quick
->
get_type
()
==
QUICK_SELECT_I
::
QS_TYPE_GROUP_MIN_MAX
));
if
((
join
->
sort_and_group
||
(
join
->
procedure
&&
join
->
procedure
->
flags
&
PROC_GROUP
))
&&
!
is_using_quick_group_min_max_select
)
!
tmp_tbl
->
precomputed_group_by
)
end_select
=
end_send_group
;
else
end_select
=
end_send
;
...
...
@@ -10553,7 +10597,6 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
copy_fields
(
&
join
->
tmp_table_param
);
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
);
#ifdef TO_BE_DELETED
if
(
!
table
->
uniques
)
// If not unique handling
{
...
...
sql/sql_select.h
View file @
d80feb9e
...
...
@@ -140,6 +140,12 @@ typedef struct st_join_table {
nested_join_map
embedding_map
;
void
cleanup
();
inline
bool
is_using_loose_index_scan
()
{
return
(
select
&&
select
->
quick
&&
(
select
->
quick
->
get_type
()
==
QUICK_SELECT_I
::
QS_TYPE_GROUP_MIN_MAX
));
}
}
JOIN_TAB
;
enum_nested_loop_state
sub_select_cache
(
JOIN
*
join
,
JOIN_TAB
*
join_tab
,
bool
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment