Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
69dd773b
Commit
69dd773b
authored
Sep 13, 2010
by
Igor Babaev
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
An implementation of index intersect via a modified Unique class.
This code is planned to be used for mwl#21.
parent
5719ca5c
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
598 additions
and
76 deletions
+598
-76
include/my_tree.h
include/my_tree.h
+1
-0
mysys/tree.c
mysys/tree.c
+2
-0
sql/filesort.cc
sql/filesort.cc
+82
-31
sql/opt_range.cc
sql/opt_range.cc
+391
-29
sql/opt_range.h
sql/opt_range.h
+55
-6
sql/sql_class.h
sql/sql_class.h
+13
-1
sql/sql_select.cc
sql/sql_select.cc
+5
-1
sql/sql_sort.h
sql/sql_sort.h
+6
-0
sql/uniques.cc
sql/uniques.cc
+43
-8
No files found.
include/my_tree.h
View file @
69dd773b
...
@@ -31,6 +31,7 @@ extern "C" {
...
@@ -31,6 +31,7 @@ extern "C" {
#define tree_set_pointer(element,ptr) *((uchar **) (element+1))=((uchar*) (ptr))
#define tree_set_pointer(element,ptr) *((uchar **) (element+1))=((uchar*) (ptr))
#define TREE_NO_DUPS 1
#define TREE_NO_DUPS 1
#define TREE_ONLY_DUPS 2
typedef
enum
{
left_root_right
,
right_root_left
}
TREE_WALK
;
typedef
enum
{
left_root_right
,
right_root_left
}
TREE_WALK
;
typedef
uint32
element_count
;
typedef
uint32
element_count
;
...
...
mysys/tree.c
View file @
69dd773b
...
@@ -221,6 +221,8 @@ TREE_ELEMENT *tree_insert(TREE *tree, void *key, uint key_size,
...
@@ -221,6 +221,8 @@ TREE_ELEMENT *tree_insert(TREE *tree, void *key, uint key_size,
}
}
if
(
element
==
&
tree
->
null_element
)
if
(
element
==
&
tree
->
null_element
)
{
{
if
(
tree
->
flag
&
TREE_ONLY_DUPS
)
return
((
TREE_ELEMENT
*
)
1
);
uint
alloc_size
=
sizeof
(
TREE_ELEMENT
)
+
key_size
+
tree
->
size_of_element
;
uint
alloc_size
=
sizeof
(
TREE_ELEMENT
)
+
key_size
+
tree
->
size_of_element
;
tree
->
allocated
+=
alloc_size
;
tree
->
allocated
+=
alloc_size
;
...
...
sql/filesort.cc
View file @
69dd773b
...
@@ -50,10 +50,6 @@ static int write_keys(SORTPARAM *param,uchar * *sort_keys,
...
@@ -50,10 +50,6 @@ static int write_keys(SORTPARAM *param,uchar * *sort_keys,
uint
count
,
IO_CACHE
*
buffer_file
,
IO_CACHE
*
tempfile
);
uint
count
,
IO_CACHE
*
buffer_file
,
IO_CACHE
*
tempfile
);
static
void
make_sortkey
(
SORTPARAM
*
param
,
uchar
*
to
,
uchar
*
ref_pos
);
static
void
make_sortkey
(
SORTPARAM
*
param
,
uchar
*
to
,
uchar
*
ref_pos
);
static
void
register_used_fields
(
SORTPARAM
*
param
);
static
void
register_used_fields
(
SORTPARAM
*
param
);
static
int
merge_index
(
SORTPARAM
*
param
,
uchar
*
sort_buffer
,
BUFFPEK
*
buffpek
,
uint
maxbuffer
,
IO_CACHE
*
tempfile
,
IO_CACHE
*
outfile
);
static
bool
save_index
(
SORTPARAM
*
param
,
uchar
**
sort_keys
,
uint
count
,
static
bool
save_index
(
SORTPARAM
*
param
,
uchar
**
sort_keys
,
uint
count
,
FILESORT_INFO
*
table_sort
);
FILESORT_INFO
*
table_sort
);
static
uint
suffix_length
(
ulong
string_length
);
static
uint
suffix_length
(
ulong
string_length
);
...
@@ -145,6 +141,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
...
@@ -145,6 +141,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
/* filesort cannot handle zero-length records. */
/* filesort cannot handle zero-length records. */
DBUG_ASSERT
(
param
.
sort_length
);
DBUG_ASSERT
(
param
.
sort_length
);
param
.
ref_length
=
table
->
file
->
ref_length
;
param
.
ref_length
=
table
->
file
->
ref_length
;
param
.
min_dupl_count
=
0
;
param
.
addon_field
=
0
;
param
.
addon_field
=
0
;
param
.
addon_length
=
0
;
param
.
addon_length
=
0
;
if
(
!
(
table
->
file
->
ha_table_flags
()
&
HA_FAST_KEY_READ
)
&&
if
(
!
(
table
->
file
->
ha_table_flags
()
&
HA_FAST_KEY_READ
)
&&
...
@@ -1216,7 +1213,13 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
...
@@ -1216,7 +1213,13 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
rec_length
=
param
->
rec_length
;
rec_length
=
param
->
rec_length
;
res_length
=
param
->
res_length
;
res_length
=
param
->
res_length
;
sort_length
=
param
->
sort_length
;
sort_length
=
param
->
sort_length
;
offset
=
rec_length
-
res_length
;
element_count
dupl_count
;
uchar
*
src
;
uint
dupl_count_ofs
=
rec_length
-
sizeof
(
element_count
);
uint
min_dupl_count
=
param
->
min_dupl_count
;
offset
=
rec_length
-
(
flag
&&
min_dupl_count
?
sizeof
(
dupl_count
)
:
0
)
-
res_length
;
uint
wr_len
=
flag
?
res_length
:
rec_length
;
maxcount
=
(
ulong
)
(
param
->
keys
/
((
uint
)
(
Tb
-
Fb
)
+
1
));
maxcount
=
(
ulong
)
(
param
->
keys
/
((
uint
)
(
Tb
-
Fb
)
+
1
));
to_start_filepos
=
my_b_tell
(
to_file
);
to_start_filepos
=
my_b_tell
(
to_file
);
strpos
=
sort_buffer
;
strpos
=
sort_buffer
;
...
@@ -1262,16 +1265,20 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
...
@@ -1262,16 +1265,20 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
*/
*/
buffpek
=
(
BUFFPEK
*
)
queue_top
(
&
queue
);
buffpek
=
(
BUFFPEK
*
)
queue_top
(
&
queue
);
memcpy
(
param
->
unique_buff
,
buffpek
->
key
,
rec_length
);
memcpy
(
param
->
unique_buff
,
buffpek
->
key
,
rec_length
);
if
(
my_b_write
(
to_file
,
(
uchar
*
)
buffpek
->
key
,
rec_length
))
if
(
min_dupl_count
)
{
memcpy
(
&
dupl_count
,
param
->
unique_buff
+
dupl_count_ofs
,
error
=
1
;
goto
err
;
/* purecov: inspected */
sizeof
(
dupl_count
));
}
buffpek
->
key
+=
rec_length
;
buffpek
->
key
+=
rec_length
;
buffpek
->
mem_count
--
;
if
(
!
--
buffpek
->
mem_count
)
if
(
!--
max_rows
)
{
{
error
=
0
;
/* purecov: inspected */
if
(
!
(
error
=
(
int
)
read_to_buffer
(
from_file
,
buffpek
,
goto
end
;
/* purecov: inspected */
rec_length
)))
{
VOID
(
queue_remove
(
&
queue
,
0
));
reuse_freed_buff
(
&
queue
,
buffpek
,
rec_length
);
}
else
if
(
error
==
-
1
)
goto
err
;
/* purecov: inspected */
}
}
queue_replaced
(
&
queue
);
// Top element has been used
queue_replaced
(
&
queue
);
// Top element has been used
}
}
...
@@ -1287,27 +1294,42 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
...
@@ -1287,27 +1294,42 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
for
(;;)
for
(;;)
{
{
buffpek
=
(
BUFFPEK
*
)
queue_top
(
&
queue
);
buffpek
=
(
BUFFPEK
*
)
queue_top
(
&
queue
);
src
=
buffpek
->
key
;
if
(
cmp
)
// Remove duplicates
if
(
cmp
)
// Remove duplicates
{
{
if
(
!
(
*
cmp
)(
first_cmp_arg
,
&
(
param
->
unique_buff
),
if
(
!
(
*
cmp
)(
first_cmp_arg
,
&
(
param
->
unique_buff
),
(
uchar
**
)
&
buffpek
->
key
))
(
uchar
**
)
&
buffpek
->
key
))
goto
skip_duplicate
;
{
memcpy
(
param
->
unique_buff
,
(
uchar
*
)
buffpek
->
key
,
rec_length
);
if
(
min_dupl_count
)
}
{
if
(
flag
==
0
)
element_count
cnt
;
{
memcpy
(
&
cnt
,
(
uchar
*
)
buffpek
->
key
+
dupl_count_ofs
,
sizeof
(
cnt
));
if
(
my_b_write
(
to_file
,(
uchar
*
)
buffpek
->
key
,
rec_length
))
dupl_count
+=
cnt
;
{
}
error
=
1
;
goto
err
;
/* purecov: inspected */
goto
skip_duplicate
;
}
}
if
(
min_dupl_count
)
{
memcpy
(
param
->
unique_buff
+
dupl_count_ofs
,
&
dupl_count
,
sizeof
(
dupl_count
));
}
src
=
param
->
unique_buff
;
}
}
else
if
(
!
flag
||
!
min_dupl_count
||
dupl_count
>=
min_dupl_count
)
{
{
if
(
my_b_write
(
to_file
,
(
uchar
*
)
buffpek
->
key
+
offset
,
res_length
))
if
(
my_b_write
(
to_file
,
src
+
(
flag
?
offset
:
0
),
wr_len
))
{
{
error
=
1
;
goto
err
;
/* purecov: inspected */
error
=
1
;
goto
err
;
/* purecov: inspected */
}
}
}
}
if
(
cmp
)
{
memcpy
(
param
->
unique_buff
,
(
uchar
*
)
buffpek
->
key
,
rec_length
);
if
(
min_dupl_count
)
memcpy
(
&
dupl_count
,
param
->
unique_buff
+
dupl_count_ofs
,
sizeof
(
dupl_count
));
}
if
(
!--
max_rows
)
if
(
!--
max_rows
)
{
{
error
=
0
;
/* purecov: inspected */
error
=
0
;
/* purecov: inspected */
...
@@ -1343,9 +1365,33 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
...
@@ -1343,9 +1365,33 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
{
{
if
(
!
(
*
cmp
)(
first_cmp_arg
,
&
(
param
->
unique_buff
),
(
uchar
**
)
&
buffpek
->
key
))
if
(
!
(
*
cmp
)(
first_cmp_arg
,
&
(
param
->
unique_buff
),
(
uchar
**
)
&
buffpek
->
key
))
{
{
buffpek
->
key
+=
rec_length
;
// Remove duplicate
if
(
min_dupl_count
)
{
element_count
cnt
;
memcpy
(
&
cnt
,
(
uchar
*
)
buffpek
->
key
+
dupl_count_ofs
,
sizeof
(
cnt
));
dupl_count
+=
cnt
;
}
buffpek
->
key
+=
rec_length
;
--
buffpek
->
mem_count
;
--
buffpek
->
mem_count
;
}
}
if
(
min_dupl_count
)
memcpy
(
param
->
unique_buff
+
dupl_count_ofs
,
&
dupl_count
,
sizeof
(
dupl_count
));
if
(
!
flag
||
!
min_dupl_count
||
dupl_count
>=
min_dupl_count
)
{
src
=
param
->
unique_buff
;
if
(
my_b_write
(
to_file
,
src
+
(
flag
?
offset
:
0
),
wr_len
))
{
error
=
1
;
goto
err
;
/* purecov: inspected */
}
if
(
!--
max_rows
)
{
error
=
0
;
goto
end
;
}
}
}
}
do
do
...
@@ -1367,12 +1413,17 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
...
@@ -1367,12 +1413,17 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
else
else
{
{
register
uchar
*
end
;
register
uchar
*
end
;
strpos
=
buffpek
->
key
+
offset
;
src
=
buffpek
->
key
+
offset
;
for
(
end
=
strpos
+
buffpek
->
mem_count
*
rec_length
;
for
(
end
=
src
+
buffpek
->
mem_count
*
rec_length
;
strpos
!=
end
;
src
!=
end
;
strpos
+=
rec_length
)
src
+=
rec_length
)
{
{
if
(
my_b_write
(
to_file
,
strpos
,
res_length
))
if
(
flag
&&
min_dupl_count
&&
memcmp
(
&
min_dupl_count
,
src
+
dupl_count_ofs
,
sizeof
(
dupl_count_ofs
))
<
0
)
continue
;
if
(
my_b_write
(
to_file
,
src
,
wr_len
))
{
{
error
=
1
;
goto
err
;
error
=
1
;
goto
err
;
}
}
...
@@ -1393,7 +1444,7 @@ err:
...
@@ -1393,7 +1444,7 @@ err:
/* Do a merge to output-file (save only positions) */
/* Do a merge to output-file (save only positions) */
static
int
merge_index
(
SORTPARAM
*
param
,
uchar
*
sort_buffer
,
int
merge_index
(
SORTPARAM
*
param
,
uchar
*
sort_buffer
,
BUFFPEK
*
buffpek
,
uint
maxbuffer
,
BUFFPEK
*
buffpek
,
uint
maxbuffer
,
IO_CACHE
*
tempfile
,
IO_CACHE
*
outfile
)
IO_CACHE
*
tempfile
,
IO_CACHE
*
outfile
)
{
{
...
...
sql/opt_range.cc
View file @
69dd773b
...
@@ -697,6 +697,9 @@ public:
...
@@ -697,6 +697,9 @@ public:
key_map
ror_scans_map
;
/* bitmask of ROR scan-able elements in keys */
key_map
ror_scans_map
;
/* bitmask of ROR scan-able elements in keys */
uint
n_ror_scans
;
/* number of set bits in ror_scans_map */
uint
n_ror_scans
;
/* number of set bits in ror_scans_map */
struct
st_index_scan_info
**
index_scans
;
/* list of index scans */
struct
st_index_scan_info
**
index_scans_end
;
/* last index scan */
struct
st_ror_scan_info
**
ror_scans
;
/* list of ROR key scans */
struct
st_ror_scan_info
**
ror_scans
;
/* list of ROR key scans */
struct
st_ror_scan_info
**
ror_scans_end
;
/* last ROR scan */
struct
st_ror_scan_info
**
ror_scans_end
;
/* last ROR scan */
/* Note that #records for each key scan is stored in table->quick_rows */
/* Note that #records for each key scan is stored in table->quick_rows */
...
@@ -776,9 +779,11 @@ class TABLE_READ_PLAN;
...
@@ -776,9 +779,11 @@ class TABLE_READ_PLAN;
class
TRP_RANGE
;
class
TRP_RANGE
;
class
TRP_ROR_INTERSECT
;
class
TRP_ROR_INTERSECT
;
class
TRP_ROR_UNION
;
class
TRP_ROR_UNION
;
class
TRP_INDEX_INTERSECT
;
class
TRP_INDEX_MERGE
;
class
TRP_INDEX_MERGE
;
class
TRP_GROUP_MIN_MAX
;
class
TRP_GROUP_MIN_MAX
;
struct
st_index_scan_info
;
struct
st_ror_scan_info
;
struct
st_ror_scan_info
;
static
SEL_TREE
*
get_mm_parts
(
RANGE_OPT_PARAM
*
param
,
COND
*
cond_func
,
Field
*
field
,
static
SEL_TREE
*
get_mm_parts
(
RANGE_OPT_PARAM
*
param
,
COND
*
cond_func
,
Field
*
field
,
...
@@ -804,6 +809,9 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
...
@@ -804,6 +809,9 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
bool
update_tbl_stats
,
bool
update_tbl_stats
,
double
read_time
);
double
read_time
);
static
static
TRP_INDEX_INTERSECT
*
get_best_index_intersect
(
PARAM
*
param
,
SEL_TREE
*
tree
,
double
read_time
);
static
TRP_ROR_INTERSECT
*
get_best_ror_intersect
(
const
PARAM
*
param
,
SEL_TREE
*
tree
,
TRP_ROR_INTERSECT
*
get_best_ror_intersect
(
const
PARAM
*
param
,
SEL_TREE
*
tree
,
double
read_time
,
double
read_time
,
bool
*
are_all_covering
);
bool
*
are_all_covering
);
...
@@ -1742,7 +1750,7 @@ int QUICK_INDEX_MERGE_SELECT::init()
...
@@ -1742,7 +1750,7 @@ int QUICK_INDEX_MERGE_SELECT::init()
int
QUICK_INDEX_MERGE_SELECT
::
reset
()
int
QUICK_INDEX_MERGE_SELECT
::
reset
()
{
{
DBUG_ENTER
(
"QUICK_INDEX_MERGE_SELECT::reset"
);
DBUG_ENTER
(
"QUICK_INDEX_MERGE_SELECT::reset"
);
DBUG_RETURN
(
read_keys_and_merge
());
DBUG_RETURN
(
read_keys_and_merge
());
}
}
bool
bool
...
@@ -1778,6 +1786,64 @@ QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT()
...
@@ -1778,6 +1786,64 @@ QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT()
DBUG_VOID_RETURN
;
DBUG_VOID_RETURN
;
}
}
QUICK_INDEX_INTERSECT_SELECT
::
QUICK_INDEX_INTERSECT_SELECT
(
THD
*
thd_param
,
TABLE
*
table
)
:
pk_quick_select
(
NULL
),
thd
(
thd_param
)
{
DBUG_ENTER
(
"QUICK_INDEX_INTERSECT_SELECT::QUICK_INDEX_INTERSECT_SELECT"
);
index
=
MAX_KEY
;
head
=
table
;
bzero
(
&
read_record
,
sizeof
(
read_record
));
init_sql_alloc
(
&
alloc
,
thd
->
variables
.
range_alloc_block_size
,
0
);
DBUG_VOID_RETURN
;
}
int
QUICK_INDEX_INTERSECT_SELECT
::
init
()
{
DBUG_ENTER
(
"QUICK_INDEX_INTERSECT_SELECT::init"
);
DBUG_RETURN
(
0
);
}
int
QUICK_INDEX_INTERSECT_SELECT
::
reset
()
{
DBUG_ENTER
(
"QUICK_INDEX_INTERSECT_SELECT::reset"
);
DBUG_RETURN
(
read_keys_and_merge
());
}
bool
QUICK_INDEX_INTERSECT_SELECT
::
push_quick_back
(
QUICK_RANGE_SELECT
*
quick_sel_range
)
{
/*
Save quick_select that does scan on clustered primary key as it will be
processed separately.
*/
if
(
head
->
file
->
primary_key_is_clustered
()
&&
quick_sel_range
->
index
==
head
->
s
->
primary_key
)
pk_quick_select
=
quick_sel_range
;
else
return
quick_selects
.
push_back
(
quick_sel_range
);
return
0
;
}
QUICK_INDEX_INTERSECT_SELECT
::~
QUICK_INDEX_INTERSECT_SELECT
()
{
List_iterator_fast
<
QUICK_RANGE_SELECT
>
quick_it
(
quick_selects
);
QUICK_RANGE_SELECT
*
quick
;
DBUG_ENTER
(
"QUICK_INDEX_INTERSECT_SELECT::~QUICK_INDEX_INTERSECT_SELECT"
);
delete
unique
;
quick_it
.
rewind
();
while
((
quick
=
quick_it
++
))
quick
->
file
=
NULL
;
quick_selects
.
delete_elements
();
delete
pk_quick_select
;
/* It's ok to call the next two even if they are already deinitialized */
end_read_record
(
&
read_record
);
free_io_cache
(
head
);
free_root
(
&
alloc
,
MYF
(
0
));
DBUG_VOID_RETURN
;
}
QUICK_ROR_INTERSECT_SELECT
::
QUICK_ROR_INTERSECT_SELECT
(
THD
*
thd_param
,
QUICK_ROR_INTERSECT_SELECT
::
QUICK_ROR_INTERSECT_SELECT
(
THD
*
thd_param
,
TABLE
*
table
,
TABLE
*
table
,
...
@@ -2558,6 +2624,24 @@ public:
...
@@ -2558,6 +2624,24 @@ public:
};
};
/*
Plan for QUICK_INDEX_INTERSECT_SELECT scan.
QUICK_INDEX_INTERSECT_SELECT always retrieves full rows, so retrieve_full_rows
is ignored by make_quick.
*/
class
TRP_INDEX_INTERSECT
:
public
TABLE_READ_PLAN
{
public:
TRP_INDEX_INTERSECT
()
{}
/* Remove gcc warning */
virtual
~
TRP_INDEX_INTERSECT
()
{}
/* Remove gcc warning */
QUICK_SELECT_I
*
make_quick
(
PARAM
*
param
,
bool
retrieve_full_rows
,
MEM_ROOT
*
parent_alloc
);
TRP_RANGE
**
range_scans
;
/* array of ptrs to plans of merged scans */
TRP_RANGE
**
range_scans_end
;
/* end of the array */
};
/*
/*
Plan for QUICK_INDEX_MERGE_SELECT scan.
Plan for QUICK_INDEX_MERGE_SELECT scan.
QUICK_ROR_INTERSECT_SELECT always retrieves full rows, so retrieve_full_rows
QUICK_ROR_INTERSECT_SELECT always retrieves full rows, so retrieve_full_rows
...
@@ -2625,6 +2709,30 @@ public:
...
@@ -2625,6 +2709,30 @@ public:
};
};
typedef
struct
st_index_scan_info
{
uint
idx
;
/* # of used key in param->keys */
uint
keynr
;
/* # of used key in table */
uint
range_count
;
ha_rows
records
;
/* estimate of # records this scan will return */
/* Set of intervals over key fields that will be used for row retrieval. */
SEL_ARG
*
sel_arg
;
/* Fields used in the query and covered by this ROR scan. */
MY_BITMAP
covered_fields
;
uint
used_fields_covered
;
/* # of set bits in covered_fields */
int
key_rec_length
;
/* length of key record (including rowid) */
/*
Cost of reading all index records with values in sel_arg intervals set
(assuming there is no need to access full table records)
*/
double
index_read_cost
;
uint
first_uncovered_field
;
/* first unused bit in covered_fields */
uint
key_components
;
/* # of parts in the key */
}
INDEX_SCAN_INFO
;
/*
/*
Fill param->needed_fields with bitmap of fields used in the query.
Fill param->needed_fields with bitmap of fields used in the query.
SYNOPSIS
SYNOPSIS
...
@@ -2903,6 +3011,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
...
@@ -2903,6 +3011,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
*/
*/
TRP_RANGE
*
range_trp
;
TRP_RANGE
*
range_trp
;
TRP_ROR_INTERSECT
*
rori_trp
;
TRP_ROR_INTERSECT
*
rori_trp
;
TRP_INDEX_INTERSECT
*
intersect_trp
;
bool
can_build_covering
=
FALSE
;
bool
can_build_covering
=
FALSE
;
remove_nonrange_trees
(
&
param
,
tree
);
remove_nonrange_trees
(
&
param
,
tree
);
...
@@ -2942,6 +3051,18 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
...
@@ -2942,6 +3051,18 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
best_trp
=
rori_trp
;
best_trp
=
rori_trp
;
}
}
}
}
#if 1
#else
if
(
optimizer_flag
(
thd
,
OPTIMIZER_SWITCH_INDEX_MERGE
))
{
if
((
intersect_trp
=
get_best_index_intersect
(
&
param
,
tree
,
best_read_time
)))
{
best_trp
=
intersect_trp
;
best_read_time
=
best_trp
->
read_cost
;
}
}
#endif
if
(
optimizer_flag
(
thd
,
OPTIMIZER_SWITCH_INDEX_MERGE
))
if
(
optimizer_flag
(
thd
,
OPTIMIZER_SWITCH_INDEX_MERGE
))
{
{
...
@@ -4605,6 +4726,85 @@ TABLE_READ_PLAN *merge_same_index_scans(PARAM *param, SEL_IMERGE *imerge,
...
@@ -4605,6 +4726,85 @@ TABLE_READ_PLAN *merge_same_index_scans(PARAM *param, SEL_IMERGE *imerge,
DBUG_RETURN
(
trp
);
DBUG_RETURN
(
trp
);
}
}
static
TRP_INDEX_INTERSECT
*
get_best_index_intersect
(
PARAM
*
param
,
SEL_TREE
*
tree
,
double
read_time
)
{
uint
i
;
uint
unique_calc_buff_size
;
TRP_RANGE
**
cur_range
;
TRP_RANGE
**
range_scans
;
TRP_INDEX_INTERSECT
*
intersect_trp
=
NULL
;
double
intersect_cost
=
0.0
;
ha_rows
scan_records
=
0
;
double
selectivity
=
1.0
;
ha_rows
table_records
=
param
->
table
->
file
->
stats
.
records
;
uint
n_index_scans
=
tree
->
index_scans_end
-
tree
->
index_scans
;
DBUG_ENTER
(
"get_best_index_intersect"
);
if
(
!
n_index_scans
)
DBUG_RETURN
(
NULL
);
if
(
!
(
range_scans
=
(
TRP_RANGE
**
)
alloc_root
(
param
->
mem_root
,
sizeof
(
TRP_RANGE
*
)
*
n_index_scans
)))
DBUG_RETURN
(
NULL
);
for
(
i
=
0
,
cur_range
=
range_scans
;
i
<
n_index_scans
;
i
++
)
{
struct
st_index_scan_info
*
index_scan
=
tree
->
index_scans
[
i
];
if
((
*
cur_range
=
new
(
param
->
mem_root
)
TRP_RANGE
(
index_scan
->
sel_arg
,
index_scan
->
idx
)))
{
TRP_RANGE
*
trp
=
*
cur_range
;
trp
->
records
=
index_scan
->
records
;
trp
->
is_ror
=
FALSE
;
trp
->
read_cost
=
get_index_only_read_time
(
param
,
index_scan
->
records
,
index_scan
->
keynr
);
scan_records
+=
trp
->
records
;
selectivity
*=
(
double
)
trp
->
records
/
table_records
;
intersect_cost
+=
trp
->
read_cost
;
cur_range
++
;
}
}
/* Add Unique operations cost */
unique_calc_buff_size
=
Unique
::
get_cost_calc_buff_size
((
ulong
)
scan_records
,
param
->
table
->
file
->
ref_length
,
param
->
thd
->
variables
.
sortbuff_size
);
if
(
param
->
imerge_cost_buff_size
<
unique_calc_buff_size
)
{
if
(
!
(
param
->
imerge_cost_buff
=
(
uint
*
)
alloc_root
(
param
->
mem_root
,
unique_calc_buff_size
)))
DBUG_RETURN
(
NULL
);
param
->
imerge_cost_buff_size
=
unique_calc_buff_size
;
}
intersect_cost
+=
Unique
::
get_use_cost
(
param
->
imerge_cost_buff
,
scan_records
,
param
->
table
->
file
->
ref_length
,
param
->
thd
->
variables
.
sortbuff_size
);
intersect_cost
+=
get_sweep_read_cost
(
param
,
(
ha_rows
)
(
table_records
*
selectivity
));
if
(
intersect_cost
<
read_time
)
{
if
((
intersect_trp
=
new
(
param
->
mem_root
)
TRP_INDEX_INTERSECT
))
{
intersect_trp
->
read_cost
=
intersect_cost
;
intersect_trp
->
records
=
(
ha_rows
)
table_records
*
selectivity
;
set_if_bigger
(
intersect_trp
->
records
,
1
);
intersect_trp
->
range_scans
=
range_scans
;
intersect_trp
->
range_scans_end
=
cur_range
;
read_time
=
intersect_cost
;
}
}
DBUG_RETURN
(
intersect_trp
);
}
/*
/*
Calculate cost of 'index only' scan for given index and number of records.
Calculate cost of 'index only' scan for given index and number of records.
...
@@ -4642,27 +4842,8 @@ static double get_index_only_read_time(const PARAM* param, ha_rows records,
...
@@ -4642,27 +4842,8 @@ static double get_index_only_read_time(const PARAM* param, ha_rows records,
}
}
typedef
struct
st_ror_scan_info
typedef
struct
st_ror_scan_info
:
INDEX_SCAN_INFO
{
{
uint
idx
;
/* # of used key in param->keys */
uint
keynr
;
/* # of used key in table */
ha_rows
records
;
/* estimate of # records this scan will return */
/* Set of intervals over key fields that will be used for row retrieval. */
SEL_ARG
*
sel_arg
;
/* Fields used in the query and covered by this ROR scan. */
MY_BITMAP
covered_fields
;
uint
used_fields_covered
;
/* # of set bits in covered_fields */
int
key_rec_length
;
/* length of key record (including rowid) */
/*
Cost of reading all index records with values in sel_arg intervals set
(assuming there is no need to access full table records)
*/
double
index_read_cost
;
uint
first_uncovered_field
;
/* first unused bit in covered_fields */
uint
key_components
;
/* # of parts in the key */
}
ROR_SCAN_INFO
;
}
ROR_SCAN_INFO
;
...
@@ -5521,6 +5702,14 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
...
@@ -5521,6 +5702,14 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
"tree scans"
););
"tree scans"
););
tree
->
ror_scans_map
.
clear_all
();
tree
->
ror_scans_map
.
clear_all
();
tree
->
n_ror_scans
=
0
;
tree
->
n_ror_scans
=
0
;
tree
->
index_scans
=
0
;
if
(
!
tree
->
keys_map
.
is_clear_all
())
{
tree
->
index_scans
=
(
INDEX_SCAN_INFO
**
)
alloc_root
(
param
->
mem_root
,
sizeof
(
INDEX_SCAN_INFO
*
)
*
param
->
keys
);
}
tree
->
index_scans_end
=
tree
->
index_scans
;
for
(
idx
=
0
,
key
=
tree
->
keys
,
end
=
key
+
param
->
keys
;
for
(
idx
=
0
,
key
=
tree
->
keys
,
end
=
key
+
param
->
keys
;
key
!=
end
;
key
!=
end
;
key
++
,
idx
++
)
key
++
,
idx
++
)
...
@@ -5529,6 +5718,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
...
@@ -5529,6 +5718,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
double
found_read_time
;
double
found_read_time
;
if
(
*
key
)
if
(
*
key
)
{
{
INDEX_SCAN_INFO
*
index_scan
;
uint
keynr
=
param
->
real_keynr
[
idx
];
uint
keynr
=
param
->
real_keynr
[
idx
];
if
((
*
key
)
->
type
==
SEL_ARG
::
MAYBE_KEY
||
if
((
*
key
)
->
type
==
SEL_ARG
::
MAYBE_KEY
||
(
*
key
)
->
maybe_flag
)
(
*
key
)
->
maybe_flag
)
...
@@ -5538,6 +5728,17 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
...
@@ -5538,6 +5728,17 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
(
bool
)
param
->
table
->
covering_keys
.
is_set
(
keynr
);
(
bool
)
param
->
table
->
covering_keys
.
is_set
(
keynr
);
found_records
=
check_quick_select
(
param
,
idx
,
*
key
,
update_tbl_stats
);
found_records
=
check_quick_select
(
param
,
idx
,
*
key
,
update_tbl_stats
);
if
(
found_records
!=
HA_POS_ERROR
&&
tree
->
index_scans
&&
(
index_scan
=
(
INDEX_SCAN_INFO
*
)
alloc_root
(
param
->
mem_root
,
sizeof
(
INDEX_SCAN_INFO
))))
{
index_scan
->
idx
=
idx
;
index_scan
->
keynr
=
keynr
;
index_scan
->
range_count
=
param
->
range_count
;
index_scan
->
records
=
found_records
;
index_scan
->
sel_arg
=
*
key
;
*
tree
->
index_scans_end
++=
index_scan
;
}
if
(
param
->
is_ror_scan
)
if
(
param
->
is_ror_scan
)
{
{
tree
->
n_ror_scans
++
;
tree
->
n_ror_scans
++
;
...
@@ -5632,6 +5833,34 @@ QUICK_SELECT_I *TRP_INDEX_MERGE::make_quick(PARAM *param,
...
@@ -5632,6 +5833,34 @@ QUICK_SELECT_I *TRP_INDEX_MERGE::make_quick(PARAM *param,
return
quick_imerge
;
return
quick_imerge
;
}
}
QUICK_SELECT_I
*
TRP_INDEX_INTERSECT
::
make_quick
(
PARAM
*
param
,
bool
retrieve_full_rows
,
MEM_ROOT
*
parent_alloc
)
{
QUICK_INDEX_INTERSECT_SELECT
*
quick_intersect
;
QUICK_RANGE_SELECT
*
quick
;
/* index_merge always retrieves full rows, ignore retrieve_full_rows */
if
(
!
(
quick_intersect
=
new
QUICK_INDEX_INTERSECT_SELECT
(
param
->
thd
,
param
->
table
)))
return
NULL
;
quick_intersect
->
records
=
records
;
quick_intersect
->
read_time
=
read_cost
;
for
(
TRP_RANGE
**
range_scan
=
range_scans
;
range_scan
!=
range_scans_end
;
range_scan
++
)
{
if
(
!
(
quick
=
(
QUICK_RANGE_SELECT
*
)
((
*
range_scan
)
->
make_quick
(
param
,
FALSE
,
&
quick_intersect
->
alloc
)))
||
quick_intersect
->
push_quick_back
(
quick
))
{
delete
quick
;
delete
quick_intersect
;
return
NULL
;
}
}
return
quick_intersect
;
}
QUICK_SELECT_I
*
TRP_ROR_INTERSECT
::
make_quick
(
PARAM
*
param
,
QUICK_SELECT_I
*
TRP_ROR_INTERSECT
::
make_quick
(
PARAM
*
param
,
bool
retrieve_full_rows
,
bool
retrieve_full_rows
,
MEM_ROOT
*
parent_alloc
)
MEM_ROOT
*
parent_alloc
)
...
@@ -9025,6 +9254,18 @@ bool QUICK_INDEX_MERGE_SELECT::is_keys_used(const MY_BITMAP *fields)
...
@@ -9025,6 +9254,18 @@ bool QUICK_INDEX_MERGE_SELECT::is_keys_used(const MY_BITMAP *fields)
return
0
;
return
0
;
}
}
bool
QUICK_INDEX_INTERSECT_SELECT
::
is_keys_used
(
const
MY_BITMAP
*
fields
)
{
QUICK_RANGE_SELECT
*
quick
;
List_iterator_fast
<
QUICK_RANGE_SELECT
>
it
(
quick_selects
);
while
((
quick
=
it
++
))
{
if
(
is_key_used
(
head
,
quick
->
index
,
fields
))
return
1
;
}
return
0
;
}
bool
QUICK_ROR_INTERSECT_SELECT
::
is_keys_used
(
const
MY_BITMAP
*
fields
)
bool
QUICK_ROR_INTERSECT_SELECT
::
is_keys_used
(
const
MY_BITMAP
*
fields
)
{
{
QUICK_RANGE_SELECT
*
quick
;
QUICK_RANGE_SELECT
*
quick
;
...
@@ -9170,13 +9411,20 @@ err:
...
@@ -9170,13 +9411,20 @@ err:
other error
other error
*/
*/
int
QUICK_INDEX_MERGE_SELECT
::
read_keys_and_merge
()
int
read_keys_and_merge_scans
(
THD
*
thd
,
TABLE
*
head
,
List
<
QUICK_RANGE_SELECT
>
quick_selects
,
QUICK_RANGE_SELECT
*
pk_quick_select
,
READ_RECORD
*
read_record
,
bool
intersection
,
Unique
**
unique_ptr
)
{
{
List_iterator_fast
<
QUICK_RANGE_SELECT
>
cur_quick_it
(
quick_selects
);
List_iterator_fast
<
QUICK_RANGE_SELECT
>
cur_quick_it
(
quick_selects
);
QUICK_RANGE_SELECT
*
cur_quick
;
QUICK_RANGE_SELECT
*
cur_quick
;
int
result
;
int
result
;
Unique
*
unique
=
*
unique_ptr
;
handler
*
file
=
head
->
file
;
handler
*
file
=
head
->
file
;
DBUG_ENTER
(
"
QUICK_INDEX_MERGE_SELECT::
read_keys_and_merge"
);
DBUG_ENTER
(
"read_keys_and_merge"
);
/* We're going to just read rowids. */
/* We're going to just read rowids. */
if
(
!
head
->
key_read
)
if
(
!
head
->
key_read
)
...
@@ -9187,6 +9435,7 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
...
@@ -9187,6 +9435,7 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
cur_quick_it
.
rewind
();
cur_quick_it
.
rewind
();
cur_quick
=
cur_quick_it
++
;
cur_quick
=
cur_quick_it
++
;
bool
first_quick
=
TRUE
;
DBUG_ASSERT
(
cur_quick
!=
0
);
DBUG_ASSERT
(
cur_quick
!=
0
);
/*
/*
...
@@ -9204,9 +9453,11 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
...
@@ -9204,9 +9453,11 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
unique
=
new
Unique
(
refpos_order_cmp
,
(
void
*
)
file
,
unique
=
new
Unique
(
refpos_order_cmp
,
(
void
*
)
file
,
file
->
ref_length
,
file
->
ref_length
,
thd
->
variables
.
sortbuff_size
);
thd
->
variables
.
sortbuff_size
,
intersection
?
quick_selects
.
elements
:
0
);
if
(
!
unique
)
if
(
!
unique
)
goto
err
;
goto
err
;
*
unique_ptr
=
unique
;
}
}
else
else
unique
->
reset
();
unique
->
reset
();
...
@@ -9218,6 +9469,12 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
...
@@ -9218,6 +9469,12 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
{
{
while
((
result
=
cur_quick
->
get_next
())
==
HA_ERR_END_OF_FILE
)
while
((
result
=
cur_quick
->
get_next
())
==
HA_ERR_END_OF_FILE
)
{
{
if
(
first_quick
)
{
first_quick
=
FALSE
;
if
(
intersection
&&
unique
->
is_in_memory
())
unique
->
close_for_expansion
();
}
cur_quick
->
range_end
();
cur_quick
->
range_end
();
cur_quick
=
cur_quick_it
++
;
cur_quick
=
cur_quick_it
++
;
if
(
!
cur_quick
)
if
(
!
cur_quick
)
...
@@ -9257,12 +9514,11 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
...
@@ -9257,12 +9514,11 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
sequence.
sequence.
*/
*/
result
=
unique
->
get
(
head
);
result
=
unique
->
get
(
head
);
doing_pk_scan
=
FALSE
;
/*
/*
index_merge currently doesn't support "using index" at all
index_merge currently doesn't support "using index" at all
*/
*/
head
->
disable_keyread
();
head
->
disable_keyread
();
init_read_record
(
&
read_record
,
thd
,
head
,
(
SQL_SELECT
*
)
0
,
1
,
1
,
TRUE
);
init_read_record
(
read_record
,
thd
,
head
,
(
SQL_SELECT
*
)
0
,
1
,
1
,
TRUE
);
DBUG_RETURN
(
result
);
DBUG_RETURN
(
result
);
err:
err:
...
@@ -9271,6 +9527,17 @@ err:
...
@@ -9271,6 +9527,17 @@ err:
}
}
int
QUICK_INDEX_MERGE_SELECT
::
read_keys_and_merge
()
{
int
result
;
DBUG_ENTER
(
"QUICK_INDEX_MERGE_SELECT::read_keys_and_merge"
);
result
=
read_keys_and_merge_scans
(
thd
,
head
,
quick_selects
,
pk_quick_select
,
&
read_record
,
FALSE
,
&
unique
);
doing_pk_scan
=
FALSE
;
DBUG_RETURN
(
result
);
}
/*
/*
Get next row for index_merge.
Get next row for index_merge.
NOTES
NOTES
...
@@ -9307,6 +9574,44 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
...
@@ -9307,6 +9574,44 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
DBUG_RETURN
(
result
);
DBUG_RETURN
(
result
);
}
}
int
QUICK_INDEX_INTERSECT_SELECT
::
read_keys_and_merge
()
{
int
result
;
DBUG_ENTER
(
"QUICK_INDEX_INTERSECT_SELECT::read_keys_and_merge"
);
result
=
read_keys_and_merge_scans
(
thd
,
head
,
quick_selects
,
pk_quick_select
,
&
read_record
,
TRUE
,
&
unique
);
doing_pk_scan
=
FALSE
;
DBUG_RETURN
(
result
);
}
int
QUICK_INDEX_INTERSECT_SELECT
::
get_next
()
{
int
result
;
DBUG_ENTER
(
"QUICK_INDEX_INTERSECT_SELECT::get_next"
);
if
(
doing_pk_scan
)
DBUG_RETURN
(
pk_quick_select
->
get_next
());
if
((
result
=
read_record
.
read_record
(
&
read_record
))
==
-
1
)
{
result
=
HA_ERR_END_OF_FILE
;
end_read_record
(
&
read_record
);
free_io_cache
(
head
);
/* All rows from Unique have been retrieved, do a clustered PK scan */
if
(
pk_quick_select
)
{
doing_pk_scan
=
TRUE
;
if
((
result
=
pk_quick_select
->
init
())
||
(
result
=
pk_quick_select
->
reset
()))
DBUG_RETURN
(
result
);
DBUG_RETURN
(
pk_quick_select
->
get_next
());
}
}
DBUG_RETURN
(
result
);
}
/*
/*
Retrieve next record.
Retrieve next record.
...
@@ -10010,6 +10315,28 @@ void QUICK_INDEX_MERGE_SELECT::add_info_string(String *str)
...
@@ -10010,6 +10315,28 @@ void QUICK_INDEX_MERGE_SELECT::add_info_string(String *str)
str
->
append
(
')'
);
str
->
append
(
')'
);
}
}
void
QUICK_INDEX_INTERSECT_SELECT
::
add_info_string
(
String
*
str
)
{
QUICK_RANGE_SELECT
*
quick
;
bool
first
=
TRUE
;
List_iterator_fast
<
QUICK_RANGE_SELECT
>
it
(
quick_selects
);
str
->
append
(
STRING_WITH_LEN
(
"sort_intersect("
));
while
((
quick
=
it
++
))
{
if
(
!
first
)
str
->
append
(
','
);
else
first
=
FALSE
;
quick
->
add_info_string
(
str
);
}
if
(
pk_quick_select
)
{
str
->
append
(
','
);
pk_quick_select
->
add_info_string
(
str
);
}
str
->
append
(
')'
);
}
void
QUICK_ROR_INTERSECT_SELECT
::
add_info_string
(
String
*
str
)
void
QUICK_ROR_INTERSECT_SELECT
::
add_info_string
(
String
*
str
)
{
{
bool
first
=
TRUE
;
bool
first
=
TRUE
;
...
@@ -10034,6 +10361,7 @@ void QUICK_ROR_INTERSECT_SELECT::add_info_string(String *str)
...
@@ -10034,6 +10361,7 @@ void QUICK_ROR_INTERSECT_SELECT::add_info_string(String *str)
str
->
append
(
')'
);
str
->
append
(
')'
);
}
}
void
QUICK_ROR_UNION_SELECT
::
add_info_string
(
String
*
str
)
void
QUICK_ROR_UNION_SELECT
::
add_info_string
(
String
*
str
)
{
{
bool
first
=
TRUE
;
bool
first
=
TRUE
;
...
@@ -10063,8 +10391,12 @@ void QUICK_RANGE_SELECT::add_keys_and_lengths(String *key_names,
...
@@ -10063,8 +10391,12 @@ void QUICK_RANGE_SELECT::add_keys_and_lengths(String *key_names,
used_lengths
->
append
(
buf
,
length
);
used_lengths
->
append
(
buf
,
length
);
}
}
void
QUICK_INDEX_MERGE_SELECT
::
add_keys_and_lengths
(
String
*
key_names
,
static
String
*
used_lengths
)
void
add_keys_and_lengths_of_index_scans
(
TABLE
*
head
,
List
<
QUICK_RANGE_SELECT
>
quick_selects
,
QUICK_RANGE_SELECT
*
pk_quick_select
,
String
*
key_names
,
String
*
used_lengths
)
{
{
char
buf
[
64
];
char
buf
[
64
];
uint
length
;
uint
length
;
...
@@ -10098,6 +10430,20 @@ void QUICK_INDEX_MERGE_SELECT::add_keys_and_lengths(String *key_names,
...
@@ -10098,6 +10430,20 @@ void QUICK_INDEX_MERGE_SELECT::add_keys_and_lengths(String *key_names,
}
}
}
}
void
QUICK_INDEX_MERGE_SELECT
::
add_keys_and_lengths
(
String
*
key_names
,
String
*
used_lengths
)
{
add_keys_and_lengths_of_index_scans
(
head
,
quick_selects
,
pk_quick_select
,
key_names
,
used_lengths
);
}
void
QUICK_INDEX_INTERSECT_SELECT
::
add_keys_and_lengths
(
String
*
key_names
,
String
*
used_lengths
)
{
add_keys_and_lengths_of_index_scans
(
head
,
quick_selects
,
pk_quick_select
,
key_names
,
used_lengths
);
}
void
QUICK_ROR_INTERSECT_SELECT
::
add_keys_and_lengths
(
String
*
key_names
,
void
QUICK_ROR_INTERSECT_SELECT
::
add_keys_and_lengths
(
String
*
key_names
,
String
*
used_lengths
)
String
*
used_lengths
)
{
{
...
@@ -12469,6 +12815,22 @@ void QUICK_INDEX_MERGE_SELECT::dbug_dump(int indent, bool verbose)
...
@@ -12469,6 +12815,22 @@ void QUICK_INDEX_MERGE_SELECT::dbug_dump(int indent, bool verbose)
fprintf
(
DBUG_FILE
,
"%*s}
\n
"
,
indent
,
""
);
fprintf
(
DBUG_FILE
,
"%*s}
\n
"
,
indent
,
""
);
}
}
void
QUICK_INDEX_INTERSECT_SELECT
::
dbug_dump
(
int
indent
,
bool
verbose
)
{
List_iterator_fast
<
QUICK_RANGE_SELECT
>
it
(
quick_selects
);
QUICK_RANGE_SELECT
*
quick
;
fprintf
(
DBUG_FILE
,
"%*squick index_intersect select
\n
"
,
indent
,
""
);
fprintf
(
DBUG_FILE
,
"%*smerged scans {
\n
"
,
indent
,
""
);
while
((
quick
=
it
++
))
quick
->
dbug_dump
(
indent
+
2
,
verbose
);
if
(
pk_quick_select
)
{
fprintf
(
DBUG_FILE
,
"%*sclustered PK quick:
\n
"
,
indent
,
""
);
pk_quick_select
->
dbug_dump
(
indent
+
2
,
verbose
);
}
fprintf
(
DBUG_FILE
,
"%*s}
\n
"
,
indent
,
""
);
}
void
QUICK_ROR_INTERSECT_SELECT
::
dbug_dump
(
int
indent
,
bool
verbose
)
void
QUICK_ROR_INTERSECT_SELECT
::
dbug_dump
(
int
indent
,
bool
verbose
)
{
{
List_iterator_fast
<
QUICK_RANGE_SELECT
>
it
(
quick_selects
);
List_iterator_fast
<
QUICK_RANGE_SELECT
>
it
(
quick_selects
);
...
...
sql/opt_range.h
View file @
69dd773b
...
@@ -274,12 +274,13 @@ public:
...
@@ -274,12 +274,13 @@ public:
enum
{
enum
{
QS_TYPE_RANGE
=
0
,
QS_TYPE_RANGE
=
0
,
QS_TYPE_INDEX_MERGE
=
1
,
QS_TYPE_INDEX_INTERSECT
=
1
,
QS_TYPE_RANGE_DESC
=
2
,
QS_TYPE_INDEX_MERGE
=
2
,
QS_TYPE_FULLTEXT
=
3
,
QS_TYPE_RANGE_DESC
=
3
,
QS_TYPE_ROR_INTERSECT
=
4
,
QS_TYPE_FULLTEXT
=
4
,
QS_TYPE_ROR_UNION
=
5
,
QS_TYPE_ROR_INTERSECT
=
5
,
QS_TYPE_GROUP_MIN_MAX
=
6
QS_TYPE_ROR_UNION
=
6
,
QS_TYPE_GROUP_MIN_MAX
=
7
};
};
/* Get type of this quick select - one of the QS_TYPE_* values */
/* Get type of this quick select - one of the QS_TYPE_* values */
...
@@ -393,8 +394,17 @@ protected:
...
@@ -393,8 +394,17 @@ protected:
friend
QUICK_RANGE_SELECT
*
get_quick_select
(
PARAM
*
,
uint
idx
,
friend
QUICK_RANGE_SELECT
*
get_quick_select
(
PARAM
*
,
uint
idx
,
SEL_ARG
*
key_tree
,
SEL_ARG
*
key_tree
,
MEM_ROOT
*
alloc
);
MEM_ROOT
*
alloc
);
friend
int
read_keys_and_merge_scans
(
THD
*
thd
,
TABLE
*
head
,
List
<
QUICK_RANGE_SELECT
>
quick_selects
,
QUICK_RANGE_SELECT
*
pk_quick_select
,
READ_RECORD
*
read_record
,
bool
intersection
,
Unique
**
unique_ptr
);
friend
class
QUICK_SELECT_DESC
;
friend
class
QUICK_SELECT_DESC
;
friend
class
QUICK_INDEX_MERGE_SELECT
;
friend
class
QUICK_INDEX_MERGE_SELECT
;
friend
class
QUICK_INDEX_INTERSECT_SELECT
;
friend
class
QUICK_ROR_INTERSECT_SELECT
;
friend
class
QUICK_ROR_INTERSECT_SELECT
;
friend
class
QUICK_GROUP_MIN_MAX_SELECT
;
friend
class
QUICK_GROUP_MIN_MAX_SELECT
;
...
@@ -545,6 +555,45 @@ public:
...
@@ -545,6 +555,45 @@ public:
READ_RECORD
read_record
;
READ_RECORD
read_record
;
};
};
class
QUICK_INDEX_INTERSECT_SELECT
:
public
QUICK_SELECT_I
{
Unique
*
unique
;
public:
QUICK_INDEX_INTERSECT_SELECT
(
THD
*
thd
,
TABLE
*
table
);
~
QUICK_INDEX_INTERSECT_SELECT
();
int
init
();
int
reset
(
void
);
int
get_next
();
bool
reverse_sorted
()
{
return
false
;
}
bool
unique_key_range
()
{
return
false
;
}
int
get_type
()
{
return
QS_TYPE_INDEX_INTERSECT
;
}
void
add_keys_and_lengths
(
String
*
key_names
,
String
*
used_lengths
);
void
add_info_string
(
String
*
str
);
bool
is_keys_used
(
const
MY_BITMAP
*
fields
);
#ifndef DBUG_OFF
void
dbug_dump
(
int
indent
,
bool
verbose
);
#endif
bool
push_quick_back
(
QUICK_RANGE_SELECT
*
quick_sel_range
);
/* range quick selects this index_merge read consists of */
List
<
QUICK_RANGE_SELECT
>
quick_selects
;
/* quick select that uses clustered primary key (NULL if none) */
QUICK_RANGE_SELECT
*
pk_quick_select
;
/* true if this select is currently doing a clustered PK scan */
bool
doing_pk_scan
;
MEM_ROOT
alloc
;
THD
*
thd
;
int
read_keys_and_merge
();
/* used to get rows collected in Unique */
READ_RECORD
read_record
;
};
/*
/*
Rowid-Ordered Retrieval (ROR) index intersection quick select.
Rowid-Ordered Retrieval (ROR) index intersection quick select.
...
...
sql/sql_class.h
View file @
69dd773b
...
@@ -2949,6 +2949,7 @@ class user_var_entry
...
@@ -2949,6 +2949,7 @@ class user_var_entry
DTCollation
collation
;
DTCollation
collation
;
};
};
/*
/*
Unique -- class for unique (removing of duplicates).
Unique -- class for unique (removing of duplicates).
Puts all values to the TREE. If the tree becomes too big,
Puts all values to the TREE. If the tree becomes too big,
...
@@ -2967,11 +2968,14 @@ class Unique :public Sql_alloc
...
@@ -2967,11 +2968,14 @@ class Unique :public Sql_alloc
uchar
*
record_pointers
;
uchar
*
record_pointers
;
bool
flush
();
bool
flush
();
uint
size
;
uint
size
;
uint
full_size
;
uint
min_dupl_count
;
public:
public:
ulong
elements
;
ulong
elements
;
Unique
(
qsort_cmp2
comp_func
,
void
*
comp_func_fixed_arg
,
Unique
(
qsort_cmp2
comp_func
,
void
*
comp_func_fixed_arg
,
uint
size_arg
,
ulonglong
max_in_memory_size_arg
);
uint
size_arg
,
ulonglong
max_in_memory_size_arg
,
uint
min_dupl_count_arg
=
0
);
~
Unique
();
~
Unique
();
ulong
elements_in_tree
()
{
return
tree
.
elements_in_tree
;
}
ulong
elements_in_tree
()
{
return
tree
.
elements_in_tree
;
}
inline
bool
unique_add
(
void
*
ptr
)
inline
bool
unique_add
(
void
*
ptr
)
...
@@ -2983,6 +2987,9 @@ public:
...
@@ -2983,6 +2987,9 @@ public:
DBUG_RETURN
(
!
tree_insert
(
&
tree
,
ptr
,
0
,
tree
.
custom_arg
));
DBUG_RETURN
(
!
tree_insert
(
&
tree
,
ptr
,
0
,
tree
.
custom_arg
));
}
}
bool
is_in_memory
()
{
return
(
my_b_tell
(
&
file
)
==
0
);
}
void
close_for_expansion
()
{
tree
.
flag
=
TREE_ONLY_DUPS
;
}
bool
get
(
TABLE
*
table
);
bool
get
(
TABLE
*
table
);
static
double
get_use_cost
(
uint
*
buffer
,
uint
nkeys
,
uint
key_size
,
static
double
get_use_cost
(
uint
*
buffer
,
uint
nkeys
,
uint
key_size
,
ulonglong
max_in_memory_size
);
ulonglong
max_in_memory_size
);
...
@@ -3002,6 +3009,11 @@ public:
...
@@ -3002,6 +3009,11 @@ public:
friend
int
unique_write_to_file
(
uchar
*
key
,
element_count
count
,
Unique
*
unique
);
friend
int
unique_write_to_file
(
uchar
*
key
,
element_count
count
,
Unique
*
unique
);
friend
int
unique_write_to_ptrs
(
uchar
*
key
,
element_count
count
,
Unique
*
unique
);
friend
int
unique_write_to_ptrs
(
uchar
*
key
,
element_count
count
,
Unique
*
unique
);
friend
int
unique_write_to_file_with_count
(
uchar
*
key
,
element_count
count
,
Unique
*
unique
);
friend
int
unique_intersect_write_to_ptrs
(
uchar
*
key
,
element_count
count
,
Unique
*
unique
);
};
};
...
...
sql/sql_select.cc
View file @
69dd773b
...
@@ -13636,7 +13636,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
...
@@ -13636,7 +13636,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
by clustered PK values.
by clustered PK values.
*/
*/
if
(
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_INDEX_MERGE
||
if
(
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_INDEX_MERGE
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_INDEX_INTERSECT
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_UNION
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_UNION
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_INTERSECT
)
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_INTERSECT
)
DBUG_RETURN
(
0
);
DBUG_RETURN
(
0
);
...
@@ -14042,6 +14043,7 @@ check_reverse_order:
...
@@ -14042,6 +14043,7 @@ check_reverse_order:
QUICK_SELECT_DESC
*
tmp
;
QUICK_SELECT_DESC
*
tmp
;
int
quick_type
=
select
->
quick
->
get_type
();
int
quick_type
=
select
->
quick
->
get_type
();
if
(
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_INDEX_MERGE
||
if
(
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_INDEX_MERGE
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_INDEX_INTERSECT
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_INTERSECT
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_INTERSECT
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_UNION
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_UNION
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_GROUP_MIN_MAX
)
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_GROUP_MIN_MAX
)
...
@@ -16810,6 +16812,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
...
@@ -16810,6 +16812,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
{
{
quick_type
=
tab
->
select
->
quick
->
get_type
();
quick_type
=
tab
->
select
->
quick
->
get_type
();
if
((
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_INDEX_MERGE
)
||
if
((
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_INDEX_MERGE
)
||
(
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_INTERSECT
)
||
(
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_INTERSECT
)
||
(
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_INTERSECT
)
||
(
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_UNION
))
(
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_UNION
))
tab
->
type
=
JT_INDEX_MERGE
;
tab
->
type
=
JT_INDEX_MERGE
;
...
@@ -17015,6 +17018,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
...
@@ -17015,6 +17018,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
{
{
if
(
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_UNION
||
if
(
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_UNION
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_INTERSECT
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_ROR_INTERSECT
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_INDEX_INTERSECT
||
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_INDEX_MERGE
)
quick_type
==
QUICK_SELECT_I
::
QS_TYPE_INDEX_MERGE
)
{
{
extra
.
append
(
STRING_WITH_LEN
(
"; Using "
));
extra
.
append
(
STRING_WITH_LEN
(
"; Using "
));
...
...
sql/sql_sort.h
View file @
69dd773b
...
@@ -57,6 +57,7 @@ typedef struct st_sort_param {
...
@@ -57,6 +57,7 @@ typedef struct st_sort_param {
uint
addon_length
;
/* Length of added packed fields */
uint
addon_length
;
/* Length of added packed fields */
uint
res_length
;
/* Length of records in final sorted file/buffer */
uint
res_length
;
/* Length of records in final sorted file/buffer */
uint
keys
;
/* Max keys / buffer */
uint
keys
;
/* Max keys / buffer */
element_count
min_dupl_count
;
ha_rows
max_rows
,
examined_rows
;
ha_rows
max_rows
,
examined_rows
;
TABLE
*
sort_form
;
/* For quicker make_sortkey */
TABLE
*
sort_form
;
/* For quicker make_sortkey */
SORT_FIELD
*
local_sortorder
;
SORT_FIELD
*
local_sortorder
;
...
@@ -80,4 +81,9 @@ int merge_buffers(SORTPARAM *param,IO_CACHE *from_file,
...
@@ -80,4 +81,9 @@ int merge_buffers(SORTPARAM *param,IO_CACHE *from_file,
IO_CACHE
*
to_file
,
uchar
*
sort_buffer
,
IO_CACHE
*
to_file
,
uchar
*
sort_buffer
,
BUFFPEK
*
lastbuff
,
BUFFPEK
*
Fb
,
BUFFPEK
*
lastbuff
,
BUFFPEK
*
Fb
,
BUFFPEK
*
Tb
,
int
flag
);
BUFFPEK
*
Tb
,
int
flag
);
int
merge_index
(
SORTPARAM
*
param
,
uchar
*
sort_buffer
,
BUFFPEK
*
buffpek
,
uint
maxbuffer
,
IO_CACHE
*
tempfile
,
IO_CACHE
*
outfile
);
void
reuse_freed_buff
(
QUEUE
*
queue
,
BUFFPEK
*
reuse
,
uint
key_length
);
void
reuse_freed_buff
(
QUEUE
*
queue
,
BUFFPEK
*
reuse
,
uint
key_length
);
sql/uniques.cc
View file @
69dd773b
...
@@ -33,7 +33,6 @@
...
@@ -33,7 +33,6 @@
#include "mysql_priv.h"
#include "mysql_priv.h"
#include "sql_sort.h"
#include "sql_sort.h"
int
unique_write_to_file
(
uchar
*
key
,
element_count
count
,
Unique
*
unique
)
int
unique_write_to_file
(
uchar
*
key
,
element_count
count
,
Unique
*
unique
)
{
{
/*
/*
...
@@ -45,6 +44,12 @@ int unique_write_to_file(uchar* key, element_count count, Unique *unique)
...
@@ -45,6 +44,12 @@ int unique_write_to_file(uchar* key, element_count count, Unique *unique)
return
my_b_write
(
&
unique
->
file
,
key
,
unique
->
size
)
?
1
:
0
;
return
my_b_write
(
&
unique
->
file
,
key
,
unique
->
size
)
?
1
:
0
;
}
}
int
unique_write_to_file_with_count
(
uchar
*
key
,
element_count
count
,
Unique
*
unique
)
{
return
my_b_write
(
&
unique
->
file
,
key
,
unique
->
size
)
||
my_b_write
(
&
unique
->
file
,
&
count
,
sizeof
(
element_count
))
?
1
:
0
;
}
int
unique_write_to_ptrs
(
uchar
*
key
,
element_count
count
,
Unique
*
unique
)
int
unique_write_to_ptrs
(
uchar
*
key
,
element_count
count
,
Unique
*
unique
)
{
{
memcpy
(
unique
->
record_pointers
,
key
,
unique
->
size
);
memcpy
(
unique
->
record_pointers
,
key
,
unique
->
size
);
...
@@ -52,10 +57,26 @@ int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique)
...
@@ -52,10 +57,26 @@ int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique)
return
0
;
return
0
;
}
}
int
unique_intersect_write_to_ptrs
(
uchar
*
key
,
element_count
count
,
Unique
*
unique
)
{
if
(
count
>=
unique
->
min_dupl_count
)
{
memcpy
(
unique
->
record_pointers
,
key
,
unique
->
size
);
unique
->
record_pointers
+=
unique
->
size
;
}
return
0
;
}
Unique
::
Unique
(
qsort_cmp2
comp_func
,
void
*
comp_func_fixed_arg
,
Unique
::
Unique
(
qsort_cmp2
comp_func
,
void
*
comp_func_fixed_arg
,
uint
size_arg
,
ulonglong
max_in_memory_size_arg
)
uint
size_arg
,
ulonglong
max_in_memory_size_arg
,
uint
min_dupl_count_arg
)
:
max_in_memory_size
(
max_in_memory_size_arg
),
size
(
size_arg
),
elements
(
0
)
:
max_in_memory_size
(
max_in_memory_size_arg
),
size
(
size_arg
),
elements
(
0
)
{
{
min_dupl_count
=
min_dupl_count_arg
;
full_size
=
size
;
if
(
min_dupl_count_arg
)
full_size
+=
sizeof
(
element_count
);
my_b_clear
(
&
file
);
my_b_clear
(
&
file
);
init_tree
(
&
tree
,
(
ulong
)
(
max_in_memory_size
/
16
),
0
,
size
,
comp_func
,
0
,
init_tree
(
&
tree
,
(
ulong
)
(
max_in_memory_size
/
16
),
0
,
size
,
comp_func
,
0
,
NULL
,
comp_func_fixed_arg
);
NULL
,
comp_func_fixed_arg
);
...
@@ -276,7 +297,11 @@ double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size,
...
@@ -276,7 +297,11 @@ double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size,
result
=
2
*
log2_n_fact
(
last_tree_elems
+
1.0
);
result
=
2
*
log2_n_fact
(
last_tree_elems
+
1.0
);
if
(
n_full_trees
)
if
(
n_full_trees
)
result
+=
n_full_trees
*
log2_n_fact
(
max_elements_in_tree
+
1.0
);
result
+=
n_full_trees
*
log2_n_fact
(
max_elements_in_tree
+
1.0
);
#if 1
result
/=
TIME_FOR_COMPARE_ROWID
;
result
/=
TIME_FOR_COMPARE_ROWID
;
#else
result
/=
TIME_FOR_COMPARE_ROWID
*
10
;
#endif
DBUG_PRINT
(
"info"
,(
"unique trees sizes: %u=%u*%lu + %lu"
,
nkeys
,
DBUG_PRINT
(
"info"
,(
"unique trees sizes: %u=%u*%lu + %lu"
,
nkeys
,
n_full_trees
,
n_full_trees
?
max_elements_in_tree
:
0
,
n_full_trees
,
n_full_trees
?
max_elements_in_tree
:
0
,
...
@@ -327,7 +352,10 @@ bool Unique::flush()
...
@@ -327,7 +352,10 @@ bool Unique::flush()
file_ptr
.
count
=
tree
.
elements_in_tree
;
file_ptr
.
count
=
tree
.
elements_in_tree
;
file_ptr
.
file_pos
=
my_b_tell
(
&
file
);
file_ptr
.
file_pos
=
my_b_tell
(
&
file
);
if
(
tree_walk
(
&
tree
,
(
tree_walk_action
)
unique_write_to_file
,
tree_walk_action
action
=
min_dupl_count
?
(
tree_walk_action
)
unique_write_to_file_with_count
:
(
tree_walk_action
)
unique_write_to_file
;
if
(
tree_walk
(
&
tree
,
action
,
(
void
*
)
this
,
left_root_right
)
||
(
void
*
)
this
,
left_root_right
)
||
insert_dynamic
(
&
file_ptrs
,
(
uchar
*
)
&
file_ptr
))
insert_dynamic
(
&
file_ptrs
,
(
uchar
*
)
&
file_ptr
))
return
1
;
return
1
;
...
@@ -357,6 +385,7 @@ Unique::reset()
...
@@ -357,6 +385,7 @@ Unique::reset()
reinit_io_cache
(
&
file
,
WRITE_CACHE
,
0L
,
0
,
1
);
reinit_io_cache
(
&
file
,
WRITE_CACHE
,
0L
,
0
,
1
);
}
}
elements
=
0
;
elements
=
0
;
tree
.
flag
=
0
;
}
}
/*
/*
...
@@ -576,14 +605,16 @@ bool Unique::get(TABLE *table)
...
@@ -576,14 +605,16 @@ bool Unique::get(TABLE *table)
{
{
SORTPARAM
sort_param
;
SORTPARAM
sort_param
;
table
->
sort
.
found_records
=
elements
+
tree
.
elements_in_tree
;
table
->
sort
.
found_records
=
elements
+
tree
.
elements_in_tree
;
if
(
my_b_tell
(
&
file
)
==
0
)
if
(
my_b_tell
(
&
file
)
==
0
)
{
{
/* Whole tree is in memory; Don't use disk if you don't need to */
/* Whole tree is in memory; Don't use disk if you don't need to */
if
((
record_pointers
=
table
->
sort
.
record_pointers
=
(
uchar
*
)
if
((
record_pointers
=
table
->
sort
.
record_pointers
=
(
uchar
*
)
my_malloc
(
size
*
tree
.
elements_in_tree
,
MYF
(
0
))))
my_malloc
(
size
*
tree
.
elements_in_tree
,
MYF
(
0
))))
{
{
(
void
)
tree_walk
(
&
tree
,
(
tree_walk_action
)
unique_write_to_ptrs
,
tree_walk_action
action
=
min_dupl_count
?
(
tree_walk_action
)
unique_intersect_write_to_ptrs
:
(
tree_walk_action
)
unique_write_to_ptrs
;
(
void
)
tree_walk
(
&
tree
,
action
,
this
,
left_root_right
);
this
,
left_root_right
);
return
0
;
return
0
;
}
}
...
@@ -614,7 +645,10 @@ bool Unique::get(TABLE *table)
...
@@ -614,7 +645,10 @@ bool Unique::get(TABLE *table)
sort_param
.
max_rows
=
elements
;
sort_param
.
max_rows
=
elements
;
sort_param
.
sort_form
=
table
;
sort_param
.
sort_form
=
table
;
sort_param
.
rec_length
=
sort_param
.
sort_length
=
sort_param
.
ref_length
=
sort_param
.
rec_length
=
sort_param
.
sort_length
=
sort_param
.
ref_length
=
size
;
sort_param
.
rec_length
=
sort_param
.
sort_length
=
sort_param
.
ref_length
=
full_size
;
sort_param
.
min_dupl_count
=
min_dupl_count
;
sort_param
.
res_length
=
0
;
sort_param
.
keys
=
(
uint
)
(
max_in_memory_size
/
sort_param
.
sort_length
);
sort_param
.
keys
=
(
uint
)
(
max_in_memory_size
/
sort_param
.
sort_length
);
sort_param
.
not_killable
=
1
;
sort_param
.
not_killable
=
1
;
...
@@ -635,8 +669,9 @@ bool Unique::get(TABLE *table)
...
@@ -635,8 +669,9 @@ bool Unique::get(TABLE *table)
if
(
flush_io_cache
(
&
file
)
||
if
(
flush_io_cache
(
&
file
)
||
reinit_io_cache
(
&
file
,
READ_CACHE
,
0L
,
0
,
0
))
reinit_io_cache
(
&
file
,
READ_CACHE
,
0L
,
0
,
0
))
goto
err
;
goto
err
;
if
(
merge_buffers
(
&
sort_param
,
&
file
,
outfile
,
sort_buffer
,
file_ptr
,
sort_param
.
res_length
=
sort_param
.
rec_length
-
file_ptr
,
file_ptr
+
maxbuffer
,
0
))
(
min_dupl_count
?
sizeof
(
min_dupl_count
)
:
0
);
if
(
merge_index
(
&
sort_param
,
sort_buffer
,
file_ptr
,
maxbuffer
,
&
file
,
outfile
))
goto
err
;
goto
err
;
error
=
0
;
error
=
0
;
err:
err:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment