Commit f19fb870 authored by unknown's avatar unknown

WL#2985 "Partition Pruning"


sql/ha_ndbcluster.cc:
  WL#2985 "Partition Pruning": added part_info->used_partitions initialization
sql/ha_partition.cc:
  WL#2985 "Partition Pruning": added part_info->used_partitions initialization
sql/handler.h:
  WL#2985 "Partition Pruning": 
  Added function prototypes
  in partition_info:
   - Added 'used_partitions' bitmap
   - Added comments
sql/item.h:
  WL#2985 "Partition Pruning": 
  - added enum monotonicity_info
  - added virtual Item::get_monotonicity_info()
sql/item_timefunc.cc:
  WL#2985 "Partition Pruning": 
  - added Item_func_to_days::get_monotonicity_info()
  - added Item_func_year::get_monotonicity_info()
sql/item_timefunc.h:
  WL#2985 "Partition Pruning": 
  - added Item_func_to_days::get_monotonicity_info()
  - added Item_func_year::get_monotonicity_info()
sql/opt_range.cc:
  WL#2985 "Partition Pruning":
  - Split out PARAM structure into PARAM and RANGE_OPT_PARAM part.
  - Added partition pruning module code.
sql/opt_range.h:
  WL#2985 "Partition Pruning": 
  Added prune_partitions() function declaration. This is the entry point for partition pruning 
  module
sql/sql_class.cc:
  WL#2985 "Partition Pruning": added support for "EXPLAIN PARTITIONS SELECT ..."
sql/sql_lex.h:
  WL#2985 "Partition Pruning": added support for "EXPLAIN PARTITIONS SELECT ..."
sql/sql_partition.cc:
  WL#2985 "Partition Pruning": 
   - Added get_list_array_idx_for_endpoint and get_range_... functions to support partition 
     pruning on "partition_field < const"-like intervals.
   - Added partition_info::used_partitions bitmap.
   - Added make_used_partitions_str function
   - Fixed BUG#15819
sql/sql_select.cc:
  WL#2985 "Partition Pruning": 
  - Added prune_partitions() invocation right before the range analysis
  - Added code to handle return value from prune_partitions()
  - Added support for "EXPLAIN PARTITIONS SELECT ..."
sql/sql_yacc.yy:
  #2985 "Partition Pruning": added support for "EXPLAIN PARTITIONS SELECT ..."
parent cdfd9f7f
This diff is collapsed.
#
# Partition pruning tests. Currently we only detect which partitions to
# prune, so the test is EXPLAINs.
#
-- source include/have_partition.inc
--disable_warnings
drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
--enable_warnings
# Check if we can infer from condition on partition fields that
# no records will match.
create table t1 ( a int not null) partition by hash(a) partitions 2;
insert into t1 values (1),(2),(3);
explain select * from t1 where a=5 and a=6;
drop table t1;
# Simple HASH partitioning
create table t1 (
a int(11) not null
) partition by hash (a) partitions 2;
insert into t1 values (1),(2),(3);
explain partitions select * from t1 where a=1;
explain partitions select * from t1 where a=2;
explain partitions select * from t1 where a=1 or a=2;
# Partitioning over several fields
create table t2 (
a int not null,
b int not null
) partition by key(a,b) partitions 2;
insert into t2 values (1,1),(2,2),(3,3);
explain partitions select * from t2 where a=1;
explain partitions select * from t2 where b=1;
explain partitions select * from t2 where a=1 and b=1;
# RANGE(expr) partitioning
create table t3 (
a int
)
partition by range (a*1) (
partition p0 values less than (10),
partition p1 values less than (20)
);
insert into t3 values (5),(15);
explain partitions select * from t3 where a=11;
explain partitions select * from t3 where a=10;
explain partitions select * from t3 where a=20;
explain partitions select * from t3 where a=30;
# LIST(expr) partitioning
create table t4 (a int not null, b int not null) partition by LIST (a+b) (
partition p0 values in (12),
partition p1 values in (14)
);
insert into t4 values (10,2), (10,4);
# empty OR one
explain partitions select * from t4 where (a=10 and b=1) or (a=10 and b=2);
# empty OR one OR empty
explain partitions select * from t4
where (a=10 and b=1) or (a=10 and b=2) or (a=10 and b = 3);
# one OR empty OR one
explain partitions select * from t4 where (a=10 and b=2) or (a=10 and b=3)
or (a=10 and b = 4);
# empty OR full
explain partitions select * from t4 where (a=10 and b=1) or a=11;
# one OR full
explain partitions select * from t4 where (a=10 and b=2) or a=11;
drop table t1, t2, t3, t4;
# LIST(expr)/HASH subpartitioning.
create table t5 (a int not null, b int not null,
c int not null, d int not null)
partition by LIST(a+b) subpartition by HASH (c+d) subpartitions 2
(
partition p0 values in (12),
partition p1 values in (14)
);
insert into t5 values (10,2,0,0), (10,4,0,0), (10,2,0,1), (10,4,0,1);
explain partitions select * from t5;
# empty OR one OR empty
explain partitions select * from t5
where (a=10 and b=1) or (a=10 and b=2) or (a=10 and b = 3);
# one OR empty OR one
explain partitions select * from t5 where (a=10 and b=2) or (a=10 and b=3)
or (a=10 and b = 4);
# conditions on subpartitions only
explain partitions select * from t5 where (c=1 and d=1);
explain partitions select * from t5 where (c=2 and d=1);
# mixed partition/subpartitions.
explain partitions select * from t5 where (a=10 and b=2 and c=1 and d=1) or
(c=2 and d=1);
# same as above
explain partitions select * from t5 where (a=10 and b=2 and c=1 and d=1) or
(b=2 and c=2 and d=1);
# LIST(field) partitioning, interval analysis.
create table t6 (a int not null) partition by LIST(a) (
partition p1 values in (1),
partition p3 values in (3),
partition p5 values in (5),
partition p7 values in (7),
partition p9 values in (9)
);
insert into t6 values (1),(3),(5);
explain partitions select * from t6 where a < 1;
explain partitions select * from t6 where a <= 1;
explain partitions select * from t6 where a > 9;
explain partitions select * from t6 where a >= 9;
explain partitions select * from t6 where a > 0 and a < 5;
explain partitions select * from t6 where a > 5 and a < 12;
explain partitions select * from t6 where a > 3 and a < 8 ;
explain partitions select * from t6 where a >= 0 and a <= 5;
explain partitions select * from t6 where a >= 5 and a <= 12;
explain partitions select * from t6 where a >= 3 and a <= 8;
explain partitions select * from t6 where a > 3 and a < 5;
# RANGE(field) partitioning, interval analysis.
create table t7 (a int not null) partition by RANGE(a) (
partition p10 values less than (10),
partition p30 values less than (30),
partition p50 values less than (50),
partition p70 values less than (70),
partition p90 values less than (90)
);
insert into t7 values (10),(30),(50);
# leftmost intervals
explain partitions select * from t7 where a < 5;
explain partitions select * from t7 where a < 10;
explain partitions select * from t7 where a <= 10;
explain partitions select * from t7 where a = 10;
#rightmost intervals
explain partitions select * from t7 where a < 90;
explain partitions select * from t7 where a = 90;
explain partitions select * from t7 where a > 90;
explain partitions select * from t7 where a >= 90;
# misc intervals
explain partitions select * from t7 where a > 11 and a < 29;
# LIST(monontonic_func) partitioning
create table t8 (a date not null) partition by RANGE(YEAR(a)) (
partition p0 values less than (1980),
partition p1 values less than (1990),
partition p2 values less than (2000)
);
insert into t8 values ('1985-05-05'),('1995-05-05');
explain partitions select * from t8 where a < '1980-02-02';
# LIST(strict_monotonic_func) partitioning
create table t9 (a date not null) partition by RANGE(TO_DAYS(a)) (
partition p0 values less than (732299), -- 2004-12-19
partition p1 values less than (732468), -- 2005-06-06
partition p2 values less than (732664) -- 2005-12-19
);
insert into t9 values ('2005-05-05'), ('2005-04-04');
explain partitions select * from t9 where a < '2004-12-19';
explain partitions select * from t9 where a <= '2004-12-19';
drop table t5,t6,t7,t8,t9;
# Test the case where we can't create partitioning 'index'
create table t1 (a enum('a','b','c','d') default 'a')
partition by hash (ascii(a)) partitions 2;
insert into t1 values ('a'),('b'),('c');
explain partitions select * from t1 where a='b';
drop table t1;
...@@ -3123,7 +3123,6 @@ void ha_ndbcluster::info(uint flag) ...@@ -3123,7 +3123,6 @@ void ha_ndbcluster::info(uint flag)
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
int ha_ndbcluster::extra(enum ha_extra_function operation) int ha_ndbcluster::extra(enum ha_extra_function operation)
{ {
DBUG_ENTER("extra"); DBUG_ENTER("extra");
...@@ -3132,6 +3131,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) ...@@ -3132,6 +3131,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
DBUG_PRINT("info", ("HA_EXTRA_RESET")); DBUG_PRINT("info", ("HA_EXTRA_RESET"));
DBUG_PRINT("info", ("Clearing condition stack")); DBUG_PRINT("info", ("Clearing condition stack"));
cond_clear(); cond_clear();
if (m_part_info)
bitmap_clear_all(&m_part_info->used_partitions);
break; break;
case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/ case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/
DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY")); DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY"));
......
...@@ -2795,6 +2795,8 @@ int ha_partition::reset(void) ...@@ -2795,6 +2795,8 @@ int ha_partition::reset(void)
handler **file; handler **file;
DBUG_ENTER("ha_partition::reset"); DBUG_ENTER("ha_partition::reset");
file= m_file; file= m_file;
if (m_part_info)
bitmap_clear_all(&m_part_info->used_partitions);
do do
{ {
if ((tmp= (*file)->reset())) if ((tmp= (*file)->reset()))
......
...@@ -534,19 +534,52 @@ class partition_info :public Sql_alloc { ...@@ -534,19 +534,52 @@ class partition_info :public Sql_alloc {
List<char> part_field_list; List<char> part_field_list;
List<char> subpart_field_list; List<char> subpart_field_list;
/*
If there is no subpartitioning, use only this func to get partition ids.
If there is subpartitioning, use the this func to get partition id when
you have both partition and subpartition fields.
*/
get_part_id_func get_partition_id; get_part_id_func get_partition_id;
/* Get partition id when we don't have subpartition fields */
get_part_id_func get_part_partition_id; get_part_id_func get_part_partition_id;
get_subpart_id_func get_subpartition_id;
/*
Get subpartition id when we have don't have partition fields by we do
have subpartition ids.
Mikael said that for given constant tuple
{subpart_field1, ..., subpart_fieldN} the subpartition id will be the
same in all subpartitions
*/
get_subpart_id_func get_subpartition_id;
/* NULL-terminated list of fields used in partitioned expression */
Field **part_field_array; Field **part_field_array;
/* NULL-terminated list of fields used in subpartitioned expression */
Field **subpart_field_array; Field **subpart_field_array;
/*
Array of all fields used in partition and subpartition expression,
without duplicates, NULL-terminated.
*/
Field **full_part_field_array; Field **full_part_field_array;
Item *part_expr; Item *part_expr;
Item *subpart_expr; Item *subpart_expr;
Item *item_free_list; Item *item_free_list;
/*
A bitmap of partitions used by the current query.
Usage pattern:
* It is guaranteed that all partitions are set to be unused on query start.
* Before index/rnd_init(), partition pruning code sets the bits for used
partitions.
* The handler->extra(HA_EXTRA_RESET) call at query end sets all partitions
to be unused.
*/
MY_BITMAP used_partitions;
union { union {
longlong *range_int_array; longlong *range_int_array;
...@@ -747,6 +780,13 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf, ...@@ -747,6 +780,13 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf,
bool mysql_unpack_partition(THD *thd, const uchar *part_buf, bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
uint part_info_len, TABLE *table, uint part_info_len, TABLE *table,
enum db_type default_db_type); enum db_type default_db_type);
void make_used_partitions_str(partition_info *part_info, String *parts_str);
uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
bool left_endpoint,
bool include_endpoint);
uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
bool left_endpoint,
bool include_endpoint);
#endif #endif
......
...@@ -368,6 +368,28 @@ class Name_resolution_context_state ...@@ -368,6 +368,28 @@ class Name_resolution_context_state
} }
}; };
/*
This enum is used to report information about monotonicity of function
represented by Item* tree.
Monotonicity is defined only for Item* trees that represent table
partitioning expressions (i.e. have no subselects/user vars/PS parameters
etc etc). An Item* tree is assumed to have the same monotonicity properties
as its correspoinding function F:
[signed] longlong F(field1, field2, ...) {
put values of field_i into table record buffer;
return item->val_int();
}
*/
typedef enum monotonicity_info
{
NON_MONOTONIC, /* none of the below holds */
MONOTONIC_INCREASING, /* F() is unary and "x < y" => "F(x) < F(y)" */
MONOTONIC_STRICT_INCREASING /* F() is unary and "x < y" => "F(x) <= F(y)" */
} enum_monotonicity_info;
/*************************************************************************/ /*************************************************************************/
typedef bool (Item::*Item_processor)(byte *arg); typedef bool (Item::*Item_processor)(byte *arg);
...@@ -465,6 +487,15 @@ class Item { ...@@ -465,6 +487,15 @@ class Item {
virtual Item_result cast_to_int_type() const { return result_type(); } virtual Item_result cast_to_int_type() const { return result_type(); }
virtual enum_field_types field_type() const; virtual enum_field_types field_type() const;
virtual enum Type type() const =0; virtual enum Type type() const =0;
/*
Return information about function monotonicity. See comment for
enum_monotonicity_info for details. This function can only be called
after fix_fields() call.
*/
virtual enum_monotonicity_info get_monotonicity_info() const
{ return NON_MONOTONIC; }
/* valXXX methods must return NULL or 0 or 0.0 if null_value is set. */ /* valXXX methods must return NULL or 0 or 0.0 if null_value is set. */
/* /*
Return double precision floating point representation of item. Return double precision floating point representation of item.
...@@ -1138,6 +1169,10 @@ class Item_field :public Item_ident ...@@ -1138,6 +1169,10 @@ class Item_field :public Item_ident
{ {
return field->type(); return field->type();
} }
enum_monotonicity_info get_monotonicity_info() const
{
return MONOTONIC_STRICT_INCREASING;
}
Field *get_tmp_table_field() { return result_field; } Field *get_tmp_table_field() { return result_field; }
Field *tmp_table_field(TABLE *t_arg) { return result_field; } Field *tmp_table_field(TABLE *t_arg) { return result_field; }
bool get_date(TIME *ltime,uint fuzzydate); bool get_date(TIME *ltime,uint fuzzydate);
......
...@@ -885,6 +885,19 @@ longlong Item_func_to_days::val_int() ...@@ -885,6 +885,19 @@ longlong Item_func_to_days::val_int()
return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day); return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day);
} }
enum_monotonicity_info Item_func_to_days::get_monotonicity_info() const
{
if (args[0]->type() == Item::FIELD_ITEM)
{
if (args[0]->field_type() == MYSQL_TYPE_DATE)
return MONOTONIC_STRICT_INCREASING;
if (args[0]->field_type() == MYSQL_TYPE_DATETIME)
return MONOTONIC_INCREASING;
}
return NON_MONOTONIC;
}
longlong Item_func_dayofyear::val_int() longlong Item_func_dayofyear::val_int()
{ {
DBUG_ASSERT(fixed == 1); DBUG_ASSERT(fixed == 1);
...@@ -1067,6 +1080,14 @@ longlong Item_func_year::val_int() ...@@ -1067,6 +1080,14 @@ longlong Item_func_year::val_int()
return (longlong) ltime.year; return (longlong) ltime.year;
} }
enum_monotonicity_info Item_func_year::get_monotonicity_info() const
{
if (args[0]->type() == Item::FIELD_ITEM &&
(args[0]->field_type() == MYSQL_TYPE_DATE ||
args[0]->field_type() == MYSQL_TYPE_DATETIME))
return MONOTONIC_INCREASING;
return NON_MONOTONIC;
}
longlong Item_func_unix_timestamp::val_int() longlong Item_func_unix_timestamp::val_int()
{ {
......
...@@ -65,6 +65,7 @@ class Item_func_to_days :public Item_int_func ...@@ -65,6 +65,7 @@ class Item_func_to_days :public Item_int_func
max_length=6*MY_CHARSET_BIN_MB_MAXLEN; max_length=6*MY_CHARSET_BIN_MB_MAXLEN;
maybe_null=1; maybe_null=1;
} }
enum_monotonicity_info get_monotonicity_info() const;
}; };
...@@ -234,6 +235,7 @@ class Item_func_year :public Item_int_func ...@@ -234,6 +235,7 @@ class Item_func_year :public Item_int_func
Item_func_year(Item *a) :Item_int_func(a) {} Item_func_year(Item *a) :Item_int_func(a) {}
longlong val_int(); longlong val_int();
const char *func_name() const { return "year"; } const char *func_name() const { return "year"; }
enum_monotonicity_info get_monotonicity_info() const;
void fix_length_and_dec() void fix_length_and_dec()
{ {
decimals=0; decimals=0;
......
This diff is collapsed.
...@@ -249,6 +249,7 @@ class QUICK_SELECT_I ...@@ -249,6 +249,7 @@ class QUICK_SELECT_I
struct st_qsel_param; struct st_qsel_param;
class PARAM;
class SEL_ARG; class SEL_ARG;
/* /*
...@@ -283,12 +284,12 @@ class QUICK_RANGE_SELECT : public QUICK_SELECT_I ...@@ -283,12 +284,12 @@ class QUICK_RANGE_SELECT : public QUICK_SELECT_I
QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
struct st_table_ref *ref, struct st_table_ref *ref,
ha_rows records); ha_rows records);
friend bool get_quick_keys(struct st_qsel_param *param, friend bool get_quick_keys(PARAM *param,
QUICK_RANGE_SELECT *quick,KEY_PART *key, QUICK_RANGE_SELECT *quick,KEY_PART *key,
SEL_ARG *key_tree, SEL_ARG *key_tree,
char *min_key, uint min_key_flag, char *min_key, uint min_key_flag,
char *max_key, uint max_key_flag); char *max_key, uint max_key_flag);
friend QUICK_RANGE_SELECT *get_quick_select(struct st_qsel_param*,uint idx, friend QUICK_RANGE_SELECT *get_quick_select(PARAM*,uint idx,
SEL_ARG *key_tree, SEL_ARG *key_tree,
MEM_ROOT *alloc); MEM_ROOT *alloc);
friend class QUICK_SELECT_DESC; friend class QUICK_SELECT_DESC;
...@@ -718,4 +719,8 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, ...@@ -718,4 +719,8 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
ha_rows records); ha_rows records);
uint get_index_for_order(TABLE *table, ORDER *order, ha_rows limit); uint get_index_for_order(TABLE *table, ORDER *order, ha_rows limit);
#ifdef WITH_PARTITION_STORAGE_ENGINE
bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond);
#endif
#endif #endif
...@@ -745,6 +745,13 @@ int THD::send_explain_fields(select_result *result) ...@@ -745,6 +745,13 @@ int THD::send_explain_fields(select_result *result)
field_list.push_back(new Item_empty_string("select_type", 19, cs)); field_list.push_back(new Item_empty_string("select_type", 19, cs));
field_list.push_back(item= new Item_empty_string("table", NAME_LEN, cs)); field_list.push_back(item= new Item_empty_string("table", NAME_LEN, cs));
item->maybe_null= 1; item->maybe_null= 1;
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (lex->describe & DESCRIBE_PARTITIONS)
{
field_list.push_back(item= new Item_empty_string("partitions", 10, cs));
item->maybe_null= 1;
}
#endif
field_list.push_back(item= new Item_empty_string("type", 10, cs)); field_list.push_back(item= new Item_empty_string("type", 10, cs));
item->maybe_null= 1; item->maybe_null= 1;
field_list.push_back(item=new Item_empty_string("possible_keys", field_list.push_back(item=new Item_empty_string("possible_keys",
......
...@@ -102,6 +102,11 @@ enum enum_sql_command { ...@@ -102,6 +102,11 @@ enum enum_sql_command {
// describe/explain types // describe/explain types
#define DESCRIBE_NORMAL 1 #define DESCRIBE_NORMAL 1
#define DESCRIBE_EXTENDED 2 #define DESCRIBE_EXTENDED 2
/*
This is not #ifdef'ed because we want "EXPLAIN PARTITIONS ..." to produce
additional "partitions" column even if partitioning is not compiled in.
*/
#define DESCRIBE_PARTITIONS 4
enum enum_sp_suid_behaviour enum enum_sp_suid_behaviour
{ {
......
...@@ -2477,16 +2477,94 @@ bool get_partition_id_list(partition_info *part_info, ...@@ -2477,16 +2477,94 @@ bool get_partition_id_list(partition_info *part_info,
if (list_value < part_func_value) if (list_value < part_func_value)
min_list_index= list_index + 1; min_list_index= list_index + 1;
else if (list_value > part_func_value) else if (list_value > part_func_value)
{
if (!list_index)
goto notfound;
max_list_index= list_index - 1; max_list_index= list_index - 1;
else { }
else
{
*part_id= (uint32)list_array[list_index].partition_id; *part_id= (uint32)list_array[list_index].partition_id;
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
} }
} }
notfound:
*part_id= 0; *part_id= 0;
DBUG_RETURN(TRUE); DBUG_RETURN(TRUE);
} }
/*
Find the part of part_info->list_array that corresponds to given interval
SYNOPSIS
get_list_array_idx_for_endpoint()
part_info Partitioning info (partitioning type must be LIST)
left_endpoint TRUE - the interval is [a; +inf) or (a; +inf)
FALSE - the interval is (-inf; a] or (-inf; a)
include_endpoint TRUE iff the interval includes the endpoint
DESCRIPTION
This function finds the part of part_info->list_array where values of
list_array[idx].list_value are contained within the specifed interval.
list_array is ordered by list_value, so
1. For [a; +inf) or (a; +inf)-type intervals (left_endpoint==TRUE), the
sought array part starts at some index idx and continues till array
end.
The function returns first number idx, such that
list_array[idx].list_value is contained within the passed interval.
2. For (-inf; a] or (-inf; a)-type intervals (left_endpoint==FALSE), the
sought array part starts at array start and continues till some last
index idx.
The function returns first number idx, such that
list_array[idx].list_value is NOT contained within the passed interval.
If all array elements are contained, part_info->no_list_values is
returned.
NOTE
The caller will call this function and then will run along the part of
list_array to collect partition ids. If the number of list values is
significantly higher then number of partitions, this could be slow and
we could invent some other approach. The "run over list array" part is
already wrapped in a get_next()-like function.
RETURN
The edge of corresponding part_info->list_array part.
*/
uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
bool left_endpoint,
bool include_endpoint)
{
DBUG_ENTER("get_list_array_idx_for_endpoint");
LIST_PART_ENTRY *list_array= part_info->list_array;
uint list_index;
longlong list_value;
uint min_list_index= 0, max_list_index= part_info->no_list_values - 1;
longlong part_func_value= part_info->part_expr->val_int();
while (max_list_index >= min_list_index)
{
list_index= (max_list_index + min_list_index) >> 1;
list_value= list_array[list_index].list_value;
if (list_value < part_func_value)
min_list_index= list_index + 1;
else if (list_value > part_func_value)
{
if (!list_index)
goto notfound;
max_list_index= list_index - 1;
}
else
{
DBUG_RETURN(list_index + test(left_endpoint ^ include_endpoint));
}
}
notfound:
if (list_value < part_func_value)
list_index++;
DBUG_RETURN(list_index);
}
bool get_partition_id_range(partition_info *part_info, bool get_partition_id_range(partition_info *part_info,
uint32 *part_id) uint32 *part_id)
...@@ -2516,6 +2594,89 @@ bool get_partition_id_range(partition_info *part_info, ...@@ -2516,6 +2594,89 @@ bool get_partition_id_range(partition_info *part_info,
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
} }
/*
Find the part of part_info->range_int_array that covers the given interval
SYNOPSIS
get_partition_id_range_for_endpoint()
part_info Partitioning info (partitioning type must be RANGE)
left_endpoint TRUE - the interval is [a; +inf) or (a; +inf)
FALSE - the interval is (-inf; a] or (-inf; a).
include_endpoint TRUE <=> the endpoint itself is included in the
interval
DESCRIPTION
This function finds the part of part_info->range_int_array where the
elements have non-empty intersections with the given interval.
A range_int_array element at index idx represents the interval
[range_int_array[idx-1], range_int_array[idx]),
intervals are disjoint and ordered by their right bound, so
1. For [a; +inf) or (a; +inf)-type intervals (left_endpoint==TRUE), the
sought array part starts at some index idx and continues till array
end.
The function returns first number idx, such that the interval
represented by range_int_array[idx] has non empty intersection with
the passed interval.
2. For (-inf; a] or (-inf; a)-type intervals (left_endpoint==FALSE), the
sought array part starts at array start and continues till some last
index idx.
The function returns first number idx, such that the interval
represented by range_int_array[idx] has EMPTY intersection with the
passed interval.
If the interval represented by the last array element has non-empty
intersection with the passed interval, part_info->no_parts is
returned.
RETURN
The edge of corresponding part_info->range_int_array part.
*/
uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
bool left_endpoint,
bool include_endpoint)
{
DBUG_ENTER("get_partition_id_range_for_endpoint");
longlong *range_array= part_info->range_int_array;
uint max_partition= part_info->no_parts - 1;
uint min_part_id= 0, max_part_id= max_partition, loc_part_id;
longlong part_func_value= part_info->part_expr->val_int();
while (max_part_id > min_part_id)
{
loc_part_id= (max_part_id + min_part_id + 1) >> 1;
if (range_array[loc_part_id] < part_func_value)
min_part_id= loc_part_id + 1;
else
max_part_id= loc_part_id - 1;
}
loc_part_id= max_part_id;
if (loc_part_id < max_partition &&
part_func_value >= range_array[loc_part_id+1])
{
loc_part_id++;
}
if (left_endpoint)
{
if (part_func_value >= range_array[loc_part_id])
loc_part_id++;
}
else
{
if (part_func_value == range_array[loc_part_id])
loc_part_id += test(include_endpoint);
else if (part_func_value > range_array[loc_part_id])
loc_part_id++;
loc_part_id++;
}
DBUG_RETURN(loc_part_id);
}
bool get_partition_id_hash_nosub(partition_info *part_info, bool get_partition_id_hash_nosub(partition_info *part_info,
uint32 *part_id) uint32 *part_id)
{ {
...@@ -3204,10 +3365,16 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf, ...@@ -3204,10 +3365,16 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
*/ */
uint part_func_len= part_info->part_func_len; uint part_func_len= part_info->part_func_len;
uint subpart_func_len= part_info->subpart_func_len; uint subpart_func_len= part_info->subpart_func_len;
uint bitmap_bits= part_info->no_subparts?
(part_info->no_subparts* part_info->no_parts):
part_info->no_parts;
uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
uint32 *bitmap_buf;
char *part_func_string, *subpart_func_string= NULL; char *part_func_string, *subpart_func_string= NULL;
if (!((part_func_string= thd->alloc(part_func_len))) || if (!((part_func_string= thd->alloc(part_func_len))) ||
(subpart_func_len && (subpart_func_len &&
!((subpart_func_string= thd->alloc(subpart_func_len))))) !((subpart_func_string= thd->alloc(subpart_func_len)))) ||
!((bitmap_buf= (uint32*)thd->alloc(bitmap_bytes))))
{ {
my_error(ER_OUTOFMEMORY, MYF(0), part_func_len); my_error(ER_OUTOFMEMORY, MYF(0), part_func_len);
free_items(thd->free_list); free_items(thd->free_list);
...@@ -3220,6 +3387,8 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf, ...@@ -3220,6 +3387,8 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf,
subpart_func_len); subpart_func_len);
part_info->part_func_string= part_func_string; part_info->part_func_string= part_func_string;
part_info->subpart_func_string= subpart_func_string; part_info->subpart_func_string= subpart_func_string;
bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE);
} }
result= FALSE; result= FALSE;
...@@ -3293,3 +3462,60 @@ void set_key_field_ptr(KEY *key_info, const byte *new_buf, ...@@ -3293,3 +3462,60 @@ void set_key_field_ptr(KEY *key_info, const byte *new_buf,
} while (++i < key_parts); } while (++i < key_parts);
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
/*
Fill the string comma-separated line of used partitions names
SYNOPSIS
make_used_partitions_str()
part_info IN Partitioning info
parts_str OUT The string to fill
*/
void make_used_partitions_str(partition_info *part_info, String *parts_str)
{
parts_str->length(0);
partition_element *pe;
uint partition_id= 0;
List_iterator<partition_element> it(part_info->partitions);
if (part_info->subpart_type != NOT_A_PARTITION)
{
partition_element *head_pe;
while ((head_pe= it++))
{
List_iterator<partition_element> it2(head_pe->subpartitions);
while ((pe= it2++))
{
if (bitmap_is_set(&part_info->used_partitions, partition_id))
{
if (parts_str->length())
parts_str->append(',');
parts_str->append(head_pe->partition_name,
strlen(head_pe->partition_name),
system_charset_info);
parts_str->append('_');
parts_str->append(pe->partition_name,
strlen(pe->partition_name),
system_charset_info);
}
partition_id++;
}
}
}
else
{
while ((pe= it++))
{
if (bitmap_is_set(&part_info->used_partitions, partition_id))
{
if (parts_str->length())
parts_str->append(',');
parts_str->append(pe->partition_name, strlen(pe->partition_name),
system_charset_info);
}
partition_id++;
}
}
}
...@@ -633,6 +633,21 @@ JOIN::optimize() ...@@ -633,6 +633,21 @@ JOIN::optimize()
DBUG_RETURN(0); DBUG_RETURN(0);
} }
#ifdef WITH_PARTITION_STORAGE_ENGINE
{
TABLE_LIST *tbl;
for (tbl= select_lex->leaf_tables; tbl; tbl= tbl->next_leaf)
{
if (!tbl->embedding)
{
Item *prune_cond= tbl->on_expr? tbl->on_expr : conds;
tbl->table->no_partitions_used= prune_partitions(thd, tbl->table,
prune_cond);
}
}
}
#endif
/* Optimize count(*), min() and max() */ /* Optimize count(*), min() and max() */
if (tables_list && tmp_table_param.sum_func_count && ! group_list) if (tables_list && tmp_table_param.sum_func_count && ! group_list)
{ {
...@@ -2018,7 +2033,11 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds, ...@@ -2018,7 +2033,11 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds,
if (*s->on_expr_ref) if (*s->on_expr_ref)
{ {
/* s is the only inner table of an outer join */ /* s is the only inner table of an outer join */
if (!table->file->records && !embedding) #ifdef WITH_PARTITION_STORAGE_ENGINE
if ((!table->file->records || table->no_partitions_used) && !embedding)
#else
if (!table->file->records || && !embedding)
#endif
{ // Empty table { // Empty table
s->dependent= 0; // Ignore LEFT JOIN depend. s->dependent= 0; // Ignore LEFT JOIN depend.
set_position(join,const_count++,s,(KEYUSE*) 0); set_position(join,const_count++,s,(KEYUSE*) 0);
...@@ -2045,8 +2064,14 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds, ...@@ -2045,8 +2064,14 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds,
while (embedding); while (embedding);
continue; continue;
} }
#ifdef WITH_PARTITION_STORAGE_ENGINE
if ((table->s->system || table->file->records <= 1) && ! s->dependent && bool no_partitions_used= table->no_partitions_used;
#else
const bool no_partitions_used= FALSE;
#endif
if ((table->s->system || table->file->records <= 1 ||
no_partitions_used) &&
!s->dependent &&
!(table->file->table_flags() & HA_NOT_EXACT_COUNT) && !(table->file->table_flags() & HA_NOT_EXACT_COUNT) &&
!table->fulltext_searched) !table->fulltext_searched)
{ {
...@@ -13767,6 +13792,9 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, ...@@ -13767,6 +13792,9 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
strlen(join->select_lex->type), cs)); strlen(join->select_lex->type), cs));
for (uint i=0 ; i < 7; i++) for (uint i=0 ; i < 7; i++)
item_list.push_back(item_null); item_list.push_back(item_null);
if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
item_list.push_back(item_null);
item_list.push_back(new Item_string(message,strlen(message),cs)); item_list.push_back(new Item_string(message,strlen(message),cs));
if (result->send_data(item_list)) if (result->send_data(item_list))
join->error= 1; join->error= 1;
...@@ -13887,7 +13915,28 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, ...@@ -13887,7 +13915,28 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
item_list.push_back(new Item_string(table->alias, item_list.push_back(new Item_string(table->alias,
strlen(table->alias), strlen(table->alias),
cs)); cs));
/* type */ /* "partitions" column */
if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
{
#ifdef WITH_PARTITION_STORAGE_ENGINE
partition_info *part_info;
if (!table->derived_select_number &&
(part_info= table->part_info))
{
char parts_buff[128];
String parts_str(parts_buff,sizeof(parts_buff),cs);
make_used_partitions_str(part_info, &parts_str);
item_list.push_back(new Item_string(parts_str.ptr(),
parts_str.length(), cs));
}
else
item_list.push_back(item_null);
#else
/* just produce empty column if partitioning is not compiled in */
item_list.push_back(item_null);
#endif
}
/* "type" column */
item_list.push_back(new Item_string(join_type_str[tab->type], item_list.push_back(new Item_string(join_type_str[tab->type],
strlen(join_type_str[tab->type]), strlen(join_type_str[tab->type]),
cs)); cs));
......
...@@ -7380,8 +7380,10 @@ describe_command: ...@@ -7380,8 +7380,10 @@ describe_command:
opt_extended_describe: opt_extended_describe:
/* empty */ {} /* empty */ {}
| EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; } | EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; }
| PARTITIONS_SYM { Lex->describe|= DESCRIBE_PARTITIONS; }
; ;
opt_describe_column: opt_describe_column:
/* empty */ {} /* empty */ {}
| text_string { Lex->wild= $1; } | text_string { Lex->wild= $1; }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment