Commit 437b5e5f authored by joerg@debian.(none)'s avatar joerg@debian.(none)

Merge jbruehe@bk-internal.mysql.com:/home/bk/mysql-5.1-new

into debian.(none):/M51/mysql-5.1
parents 922da5ea 781820c0
......@@ -218,7 +218,7 @@ t2 CREATE TABLE `t2` (
`b2` int(11) NOT NULL,
`c2` int(11) NOT NULL,
PRIMARY KEY (`pk2`)
) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1
) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
SHOW CREATE TABLE test.t1;
Table Create Table
t1 CREATE TABLE `t1` (
......@@ -226,7 +226,7 @@ t1 CREATE TABLE `t1` (
`b` int(11) NOT NULL,
`c` int(11) NOT NULL,
PRIMARY KEY (`pk1`)
) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1
) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK
ENGINE=NDB;
SHOW CREATE TABLE test.t2;
......@@ -236,7 +236,7 @@ t2 CREATE TABLE `t2` (
`b2` int(11) NOT NULL,
`c2` int(11) NOT NULL,
PRIMARY KEY (`pk2`)
) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1
) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
ALTER TABLE test.t1 ENGINE=NDBCLUSTER;
SHOW CREATE TABLE test.t1;
Table Create Table
......@@ -245,7 +245,7 @@ t1 CREATE TABLE `t1` (
`b` int(11) NOT NULL,
`c` int(11) NOT NULL,
PRIMARY KEY (`pk1`)
) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1
) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY ()
DROP TABLE test.t1;
DROP TABLE test.t2;
......
......@@ -271,3 +271,10 @@ t1 CREATE TABLE `t1` (
`b` int(11) default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (a) (PARTITION x1 VALUES LESS THAN (6) ENGINE = MyISAM, PARTITION x3 VALUES LESS THAN (8) ENGINE = MyISAM, PARTITION x4 VALUES LESS THAN (10) ENGINE = MyISAM, PARTITION x5 VALUES LESS THAN (12) ENGINE = MyISAM, PARTITION x6 VALUES LESS THAN (14) ENGINE = MyISAM, PARTITION x7 VALUES LESS THAN (16) ENGINE = MyISAM, PARTITION x8 VALUES LESS THAN (18) ENGINE = MyISAM, PARTITION x9 VALUES LESS THAN (20) ENGINE = MyISAM)
drop table t1;
create table t1 (a int not null, b int not null) partition by LIST (a+b) (
partition p0 values in (12),
partition p1 values in (14)
);
insert into t1 values (10,1);
ERROR HY000: Table has no partition for value 11
drop table t1;
......@@ -259,3 +259,48 @@ explain partitions select * from t1 where a is not null;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0,p1 ALL NULL NULL NULL NULL 2 Using where
drop table t1;
create table t1 (a int not null, b int not null, key(a), key(b))
partition by hash(a) partitions 4;
insert into t1 values (1,1),(2,2),(3,3),(4,4);
explain partitions
select * from t1 X, t1 Y
where X.b = Y.b and (X.a=1 or X.a=2) and (Y.a=2 or Y.a=3);
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE X p1,p2 ALL a,b NULL NULL NULL 4 Using where
1 SIMPLE Y p2,p3 ref a,b b 4 test.X.b 2 Using where
explain partitions
select * from t1 X, t1 Y where X.a = Y.a and (X.a=1 or X.a=2);
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE X p1,p2 ALL a NULL NULL NULL 4 Using where
1 SIMPLE Y p1,p2 ref a a 4 test.X.a 2
drop table t1;
create table t1 (a int) partition by hash(a) partitions 20;
insert into t1 values (1),(2),(3);
explain partitions select * from t1 where a > 1 and a < 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p2 ALL NULL NULL NULL NULL 3 Using where
explain partitions select * from t1 where a >= 1 and a < 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p1,p2 ALL NULL NULL NULL NULL 3 Using where
explain partitions select * from t1 where a > 1 and a <= 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p2,p3 ALL NULL NULL NULL NULL 3 Using where
explain partitions select * from t1 where a >= 1 and a <= 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p1,p2,p3 ALL NULL NULL NULL NULL 3 Using where
drop table t1;
create table t1 (a int, b int)
partition by list(a) subpartition by hash(b) subpartitions 20
(
partition p0 values in (0),
partition p1 values in (1),
partition p2 values in (2),
partition p3 values in (3)
);
insert into t1 values (1,1),(2,2),(3,3);
explain partitions select * from t1 where b > 1 and b < 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p0_sp2,p1_sp2,p2_sp2,p3_sp2 ALL NULL NULL NULL NULL 3 Using where
explain partitions select * from t1 where b > 1 and b < 3 and (a =1 or a =2);
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p1_sp2,p2_sp2 ALL NULL NULL NULL NULL 3 Using where
......@@ -24,7 +24,7 @@ partition_03ndb : Bug#16385
ndb_binlog_basic : Results are not deterministic, Tomas will fix
rpl_ndb_basic : Bug#16228
rpl_sp : Bug #16456
ndb_dd_disk2memory : Bug #16466 & 16736
#ndb_dd_disk2memory : Bug #16466
ndb_autodiscover : Needs to be fixed w.r.t binlog
ndb_autodiscover2 : Needs to be fixed w.r.t binlog
system_mysql_db : Needs fixing
......
......@@ -343,3 +343,13 @@ ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO
show create table t1;
drop table t1;
# Testcase for BUG#15819
create table t1 (a int not null, b int not null) partition by LIST (a+b) (
partition p0 values in (12),
partition p1 values in (14)
);
--error ER_NO_PARTITION_FOR_GIVEN_VALUE
insert into t1 values (10,1);
drop table t1;
......@@ -230,9 +230,45 @@ create table t1 (a int) partition by hash(a) partitions 2;
insert into t1 values (1),(2);
explain partitions select * from t1 where a is null;
# this selects both
# this uses both partitions
explain partitions select * from t1 where a is not null;
drop table t1;
# Join tests
create table t1 (a int not null, b int not null, key(a), key(b))
partition by hash(a) partitions 4;
insert into t1 values (1,1),(2,2),(3,3),(4,4);
explain partitions
select * from t1 X, t1 Y
where X.b = Y.b and (X.a=1 or X.a=2) and (Y.a=2 or Y.a=3);
explain partitions
select * from t1 X, t1 Y where X.a = Y.a and (X.a=1 or X.a=2);
drop table t1;
# Tests for "short ranges"
create table t1 (a int) partition by hash(a) partitions 20;
insert into t1 values (1),(2),(3);
explain partitions select * from t1 where a > 1 and a < 3;
explain partitions select * from t1 where a >= 1 and a < 3;
explain partitions select * from t1 where a > 1 and a <= 3;
explain partitions select * from t1 where a >= 1 and a <= 3;
drop table t1;
create table t1 (a int, b int)
partition by list(a) subpartition by hash(b) subpartitions 20
(
partition p0 values in (0),
partition p1 values in (1),
partition p2 values in (2),
partition p3 values in (3)
);
insert into t1 values (1,1),(2,2),(3,3);
explain partitions select * from t1 where b > 1 and b < 3;
explain partitions select * from t1 where b > 1 and b < 3 and (a =1 or a =2);
# No tests for NULLs in RANGE(monotonic_expr()) - they depend on BUG#15447
# being fixed.
......@@ -43,7 +43,7 @@ c_tzn="" c_tz="" c_tzt="" c_tztt="" c_tzls="" c_pl=""
i_tzn="" i_tz="" i_tzt="" i_tztt="" i_tzls="" i_pl=""
c_p="" c_pp=""
c_gl="" c_sl=""
c_ev= ""
c_ev=""
# Check for old tables
if test ! -f $mdata/db.frm
......
......@@ -620,6 +620,8 @@ typedef struct {
uint32 end_part;
bool use_bit_array;
} part_id_range;
/**
* An enum and a struct to handle partitioning and subpartitioning.
*/
......@@ -699,7 +701,109 @@ typedef int (*get_part_id_func)(partition_info *part_info,
longlong *func_value);
typedef uint32 (*get_subpart_id_func)(partition_info *part_info);
class partition_info :public Sql_alloc {
struct st_partition_iter;
#define NOT_A_PARTITION_ID ((uint32)-1)
/*
A "Get next" function for partition iterator.
SYNOPSIS
partition_iter_func()
part_iter Partition iterator, you call only "iter.get_next(&iter)"
RETURN
NOT_A_PARTITION_ID if there are no more partitions.
[sub]partition_id of the next partition
*/
typedef uint32 (*partition_iter_func)(st_partition_iter* part_iter);
/*
Partition set iterator. Used to enumerate a set of [sub]partitions
obtained in partition interval analysis (see get_partitions_in_range_iter).
For the user, the only meaningful field is get_next, which may be used as
follows:
part_iterator.get_next(&part_iterator);
Initialization is done by any of the following calls:
- get_partitions_in_range_iter-type function call
- init_single_partition_iterator()
- init_all_partitions_iterator()
Cleanup is not needed.
*/
typedef struct st_partition_iter
{
partition_iter_func get_next;
union {
struct {
uint32 start_part_num;
uint32 end_part_num;
};
struct {
longlong start_val;
longlong end_val;
};
bool null_returned;
};
partition_info *part_info;
} PARTITION_ITERATOR;
/*
Get an iterator for set of partitions that match given field-space interval
SYNOPSIS
get_partitions_in_range_iter()
part_info Partitioning info
is_subpart
min_val Left edge, field value in opt_range_key format.
max_val Right edge, field value in opt_range_key format.
flags Some combination of NEAR_MIN, NEAR_MAX, NO_MIN_RANGE,
NO_MAX_RANGE.
part_iter Iterator structure to be initialized
DESCRIPTION
Functions with this signature are used to perform "Partitioning Interval
Analysis". This analysis is applicable for any type of [sub]partitioning
by some function of a single fieldX. The idea is as follows:
Given an interval "const1 <=? fieldX <=? const2", find a set of partitions
that may contain records with value of fieldX within the given interval.
The min_val, max_val and flags parameters specify the interval.
The set of partitions is returned by initializing an iterator in *part_iter
NOTES
There are currently two functions of this type:
- get_part_iter_for_interval_via_walking
- get_part_iter_for_interval_via_mapping
RETURN
0 - No matching partitions, iterator not initialized
1 - Some partitions would match, iterator intialized for traversing them
-1 - All partitions would match, iterator not initialized
*/
typedef int (*get_partitions_in_range_iter)(partition_info *part_info,
bool is_subpart,
byte *min_val, byte *max_val,
uint flags,
PARTITION_ITERATOR *part_iter);
/* Initialize the iterator to return a single partition with given part_id */
inline void init_single_partition_iterator(uint32 part_id,
PARTITION_ITERATOR *part_iter);
/* Initialize the iterator to enumerate all partitions */
inline void init_all_partitions_iterator(partition_info *part_info,
PARTITION_ITERATOR *part_iter);
class partition_info : public Sql_alloc
{
public:
/*
* Here comes a set of definitions needed for partitioned table handlers.
......@@ -728,10 +832,10 @@ public:
same in all subpartitions
*/
get_subpart_id_func get_subpartition_id;
/* NULL-terminated list of fields used in partitioned expression */
/* NULL-terminated array of fields used in partitioned expression */
Field **part_field_array;
/* NULL-terminated list of fields used in subpartitioned expression */
/* NULL-terminated array of fields used in subpartitioned expression */
Field **subpart_field_array;
/*
......@@ -748,11 +852,10 @@ public:
/*
A bitmap of partitions used by the current query.
Usage pattern:
* It is guaranteed that all partitions are set to be unused on query start.
* The handler->extra(HA_EXTRA_RESET) call at query start/end sets all
partitions to be unused.
* Before index/rnd_init(), partition pruning code sets the bits for used
partitions.
* The handler->extra(HA_EXTRA_RESET) call at query end sets all partitions
to be unused.
*/
MY_BITMAP used_partitions;
......@@ -760,6 +863,39 @@ public:
longlong *range_int_array;
LIST_PART_ENTRY *list_array;
};
/********************************************
* INTERVAL ANALYSIS
********************************************/
/*
Partitioning interval analysis function for partitioning, or NULL if
interval analysis is not supported for this kind of partitioning.
*/
get_partitions_in_range_iter get_part_iter_for_interval;
/*
Partitioning interval analysis function for subpartitioning, or NULL if
interval analysis is not supported for this kind of partitioning.
*/
get_partitions_in_range_iter get_subpart_iter_for_interval;
/*
Valid iff
get_part_iter_for_interval=get_part_iter_for_interval_via_walking:
controls how we'll process "field < C" and "field > C" intervals.
If the partitioning function F is strictly increasing, then for any x, y
"x < y" => "F(x) < F(y)" (*), i.e. when we get interval "field < C"
we can perform partition pruning on the equivalent "F(field) < F(C)".
If the partitioning function not strictly increasing (it is simply
increasing), then instead of (*) we get "x < y" => "F(x) <= F(y)"
i.e. for interval "field < C" we can perform partition pruning for
"F(field) <= F(C)".
*/
bool range_analysis_include_bounds;
/********************************************
* INTERVAL ANALYSIS ENDS
********************************************/
char* part_info_string;
char *part_func_string;
......@@ -863,6 +999,25 @@ public:
#ifdef WITH_PARTITION_STORAGE_ENGINE
uint32 get_next_partition_id_range(struct st_partition_iter* part_iter);
inline void init_single_partition_iterator(uint32 part_id,
PARTITION_ITERATOR *part_iter)
{
part_iter->start_part_num= part_id;
part_iter->end_part_num= part_id+1;
part_iter->get_next= get_next_partition_id_range;
}
inline
void init_all_partitions_iterator(partition_info *part_info,
PARTITION_ITERATOR *part_iter)
{
part_iter->start_part_num= 0;
part_iter->end_part_num= part_info->no_parts;
part_iter->get_next= get_next_partition_id_range;
}
/*
Answers the question if subpartitioning is used for a certain table
SYNOPSIS
......
......@@ -381,13 +381,20 @@ public:
put values of field_i into table record buffer;
return item->val_int();
}
NOTE
At the moment function monotonicity is not well defined (and so may be
incorrect) for Item trees with parameters/return types that are different
from INT_RESULT, may be NULL, or are unsigned.
It will be possible to address this issue once the related partitioning bugs
(BUG#16002, BUG#15447, BUG#13436) are fixed.
*/
typedef enum monotonicity_info
{
NON_MONOTONIC, /* none of the below holds */
MONOTONIC_INCREASING, /* F() is unary and "x < y" => "F(x) < F(y)" */
MONOTONIC_STRICT_INCREASING /* F() is unary and "x < y" => "F(x) <= F(y)" */
MONOTONIC_INCREASING, /* F() is unary and (x < y) => (F(x) <= F(y)) */
MONOTONIC_STRICT_INCREASING /* F() is unary and (x < y) => (F(x) < F(y)) */
} enum_monotonicity_info;
/*************************************************************************/
......
......@@ -885,6 +885,21 @@ longlong Item_func_to_days::val_int()
return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day);
}
/*
Get information about this Item tree monotonicity
SYNOPSIS
Item_func_to_days::get_monotonicity_info()
DESCRIPTION
Get information about monotonicity of the function represented by this item
tree.
RETURN
See enum_monotonicity_info.
*/
enum_monotonicity_info Item_func_to_days::get_monotonicity_info() const
{
if (args[0]->type() == Item::FIELD_ITEM)
......@@ -1080,6 +1095,21 @@ longlong Item_func_year::val_int()
return (longlong) ltime.year;
}
/*
Get information about this Item tree monotonicity
SYNOPSIS
Item_func_to_days::get_monotonicity_info()
DESCRIPTION
Get information about monotonicity of the function represented by this item
tree.
RETURN
See enum_monotonicity_info.
*/
enum_monotonicity_info Item_func_year::get_monotonicity_info() const
{
if (args[0]->type() == Item::FIELD_ITEM &&
......
......@@ -6523,24 +6523,24 @@ static int show_ssl_ctx_get_session_cache_mode(THD *thd, SHOW_VAR *var, char *bu
{
var->type= SHOW_CHAR;
if (!ssl_acceptor_fd)
var->value= "NONE";
var->value= const_cast<char*>("NONE");
else
switch (SSL_CTX_get_session_cache_mode(ssl_acceptor_fd->ssl_context))
{
case SSL_SESS_CACHE_OFF:
var->value= "OFF"; break;
var->value= const_cast<char*>("OFF"); break;
case SSL_SESS_CACHE_CLIENT:
var->value= "CLIENT"; break;
var->value= const_cast<char*>("CLIENT"); break;
case SSL_SESS_CACHE_SERVER:
var->value= "SERVER"; break;
var->value= const_cast<char*>("SERVER"); break;
case SSL_SESS_CACHE_BOTH:
var->value= "BOTH"; break;
var->value= const_cast<char*>("BOTH"); break;
case SSL_SESS_CACHE_NO_AUTO_CLEAR:
var->value= "NO_AUTO_CLEAR"; break;
var->value= const_cast<char*>("NO_AUTO_CLEAR"); break;
case SSL_SESS_CACHE_NO_INTERNAL_LOOKUP:
var->value= "NO_INTERNAL_LOOKUP"; break;
var->value= const_cast<char*>("NO_INTERNAL_LOOKUP"); break;
default:
var->value= "Unknown"; break;
var->value= const_cast<char*>("Unknown"); break;
}
return 0;
}
......@@ -6561,6 +6561,7 @@ static int show_ssl_session_reused(THD *thd, SHOW_VAR *var, char *buff)
*((long *)buff)= (long)thd->net.vio->ssl_arg ?
SSL_session_reused((SSL*) thd->net.vio->ssl_arg) :
0;
return 0;
}
static int show_ssl_get_default_timeout(THD *thd, SHOW_VAR *var, char *buff)
......
This diff is collapsed.
......@@ -721,6 +721,7 @@ uint get_index_for_order(TABLE *table, ORDER *order, ha_rows limit);
#ifdef WITH_PARTITION_STORAGE_ENGINE
bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond);
void store_key_image_to_rec(Field *field, char *ptr, uint len);
#endif
#endif
......@@ -224,7 +224,8 @@ sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name,
File_option *param;
DBUG_ENTER("sql_create_definition_file");
DBUG_PRINT("enter", ("Dir: %s, file: %s, base 0x%lx",
dir->str, file_name->str, (ulong) base));
dir ? dir->str : "(null)",
file_name->str, (ulong) base));
if (dir)
{
......
......@@ -785,7 +785,10 @@ int THD::send_explain_fields(select_result *result)
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (lex->describe & DESCRIBE_PARTITIONS)
{
field_list.push_back(item= new Item_empty_string("partitions", 10, cs));
/* Maximum length of string that make_used_partitions_str() can produce */
item= new Item_empty_string("partitions", MAX_PARTITIONS * (1 + FN_LEN),
cs);
field_list.push_back(item);
item->maybe_null= 1;
}
#endif
......
......@@ -112,7 +112,7 @@ enum enum_sql_command {
#define DESCRIBE_NORMAL 1
#define DESCRIBE_EXTENDED 2
/*
This is not #ifdef'ed because we want "EXPLAIN PARTITIONS ..." to produce
This is not within #ifdef because we want "EXPLAIN PARTITIONS ..." to produce
additional "partitions" column even if partitioning is not compiled in.
*/
#define DESCRIBE_PARTITIONS 4
......
This diff is collapsed.
......@@ -639,6 +639,11 @@ JOIN::optimize()
TABLE_LIST *tbl;
for (tbl= select_lex->leaf_tables; tbl; tbl= tbl->next_leaf)
{
/*
If tbl->embedding!=NULL that means that this table is in the inner
part of the nested outer join, and we can't do partition pruning
(TODO: check if this limitation can be lifted)
*/
if (!tbl->embedding)
{
Item *prune_cond= tbl->on_expr? tbl->on_expr : conds;
......
......@@ -1415,7 +1415,7 @@ bool multi_update::send_data(List<Item> &not_used_values)
memcpy((char*) tmp_table->field[0]->ptr,
(char*) table->file->ref, table->file->ref_length);
/* Write row, ignoring duplicated updates to a row */
if (error= tmp_table->file->ha_write_row(tmp_table->record[0]))
if ((error= tmp_table->file->ha_write_row(tmp_table->record[0])))
{
if (error != HA_ERR_FOUND_DUPP_KEY &&
error != HA_ERR_FOUND_DUPP_UNIQUE &&
......
......@@ -270,7 +270,32 @@ int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags)
strxmov(path, share->normalized_path.str, reg_ext, NullS);
if ((file= my_open(path, O_RDONLY | O_SHARE, MYF(0))) < 0)
goto err_not_open;
{
/* Try unecoded 5.0 name */
uint length;
strxnmov(path, sizeof(path)-1,
mysql_data_home, "/", share->db.str, "/",
share->table_name.str, reg_ext, NullS);
length= unpack_filename(path, path) - reg_ext_length;
/*
The following is a safety test and should never fail
as the old file name should never be longer than the new one.
*/
DBUG_ASSERT(length <= share->normalized_path.length);
/*
If the old and the new names have the same length,
then table name does not have tricky characters,
so no need to check the old file name.
*/
if (length == share->normalized_path.length ||
((file= my_open(path, O_RDONLY | O_SHARE, MYF(0))) < 0))
goto err_not_open;
/* Unencoded 5.0 table name found */
path[length]= '\0'; // Remove .frm extension
strmov(share->normalized_path.str, path);
share->normalized_path.length= length;
}
error= 4;
if (my_read(file,(byte*) head, 64, MYF(MY_NABP)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment