WL#2506: Information Schema tables for PARTITIONing

  added I_S 'PARTITIONS' table
parent 12fcc105
...@@ -43,6 +43,7 @@ COLUMNS ...@@ -43,6 +43,7 @@ COLUMNS
COLUMN_PRIVILEGES COLUMN_PRIVILEGES
ENGINES ENGINES
KEY_COLUMN_USAGE KEY_COLUMN_USAGE
PARTITIONS
PLUGINS PLUGINS
ROUTINES ROUTINES
SCHEMATA SCHEMATA
...@@ -725,7 +726,7 @@ CREATE TABLE t_crashme ( f1 BIGINT); ...@@ -725,7 +726,7 @@ CREATE TABLE t_crashme ( f1 BIGINT);
CREATE VIEW a1 (t_CRASHME) AS SELECT f1 FROM t_crashme GROUP BY f1; CREATE VIEW a1 (t_CRASHME) AS SELECT f1 FROM t_crashme GROUP BY f1;
CREATE VIEW a2 AS SELECT t_CRASHME FROM a1; CREATE VIEW a2 AS SELECT t_CRASHME FROM a1;
count(*) count(*)
104 105
drop view a2, a1; drop view a2, a1;
drop table t_crashme; drop table t_crashme;
select table_schema,table_name, column_name from select table_schema,table_name, column_name from
...@@ -733,6 +734,9 @@ information_schema.columns ...@@ -733,6 +734,9 @@ information_schema.columns
where data_type = 'longtext'; where data_type = 'longtext';
table_schema table_name column_name table_schema table_name column_name
information_schema COLUMNS COLUMN_TYPE information_schema COLUMNS COLUMN_TYPE
information_schema PARTITIONS PARTITION_EXPRESSION
information_schema PARTITIONS SUBPARTITION_EXPRESSION
information_schema PARTITIONS PARTITION_DESCRIPTION
information_schema PLUGINS PLUGIN_DESCRIPTION information_schema PLUGINS PLUGIN_DESCRIPTION
information_schema ROUTINES ROUTINE_DEFINITION information_schema ROUTINES ROUTINE_DEFINITION
information_schema ROUTINES SQL_MODE information_schema ROUTINES SQL_MODE
...@@ -744,6 +748,9 @@ information_schema VIEWS VIEW_DEFINITION ...@@ -744,6 +748,9 @@ information_schema VIEWS VIEW_DEFINITION
select table_name, column_name, data_type from information_schema.columns select table_name, column_name, data_type from information_schema.columns
where data_type = 'datetime'; where data_type = 'datetime';
table_name column_name data_type table_name column_name data_type
PARTITIONS CREATE_TIME datetime
PARTITIONS UPDATE_TIME datetime
PARTITIONS CHECK_TIME datetime
ROUTINES CREATED datetime ROUTINES CREATED datetime
ROUTINES LAST_ALTERED datetime ROUTINES LAST_ALTERED datetime
TABLES CREATE_TIME datetime TABLES CREATE_TIME datetime
...@@ -786,6 +793,7 @@ TABLE_NAME COLUMN_NAME PRIVILEGES ...@@ -786,6 +793,7 @@ TABLE_NAME COLUMN_NAME PRIVILEGES
COLUMNS TABLE_NAME select COLUMNS TABLE_NAME select
COLUMN_PRIVILEGES TABLE_NAME select COLUMN_PRIVILEGES TABLE_NAME select
KEY_COLUMN_USAGE TABLE_NAME select KEY_COLUMN_USAGE TABLE_NAME select
PARTITIONS TABLE_NAME select
STATISTICS TABLE_NAME select STATISTICS TABLE_NAME select
TABLES TABLE_NAME select TABLES TABLE_NAME select
TABLE_CONSTRAINTS TABLE_NAME select TABLE_CONSTRAINTS TABLE_NAME select
...@@ -796,7 +804,7 @@ delete from mysql.db where user='mysqltest_4'; ...@@ -796,7 +804,7 @@ delete from mysql.db where user='mysqltest_4';
flush privileges; flush privileges;
SELECT table_schema, count(*) FROM information_schema.TABLES GROUP BY TABLE_SCHEMA; SELECT table_schema, count(*) FROM information_schema.TABLES GROUP BY TABLE_SCHEMA;
table_schema count(*) table_schema count(*)
information_schema 18 information_schema 19
mysql 18 mysql 18
create table t1 (i int, j int); create table t1 (i int, j int);
create trigger trg1 before insert on t1 for each row create trigger trg1 before insert on t1 for each row
......
...@@ -8,6 +8,7 @@ COLUMNS ...@@ -8,6 +8,7 @@ COLUMNS
COLUMN_PRIVILEGES COLUMN_PRIVILEGES
ENGINES ENGINES
KEY_COLUMN_USAGE KEY_COLUMN_USAGE
PARTITIONS
PLUGINS PLUGINS
ROUTINES ROUTINES
SCHEMATA SCHEMATA
......
drop table if exists t1,t2,t3,t4;
create table t1 (a int not null,b int not null,c int not null, primary key(a,b))
partition by list (b*a)
(partition x1 values in (1) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
select * from information_schema.partitions where table_schema="test"
and table_name="t1";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t1 x1 NULL 1 NULL LIST NULL b*a NULL 1 0 0 0 # 1024 0 # # NULL NULL default 0 ts1
NULL test t1 x2 NULL 2 NULL LIST NULL b*a NULL 3,11,5,7 0 0 0 # 1024 0 # # NULL NULL default 0 ts2
NULL test t1 x3 NULL 3 NULL LIST NULL b*a NULL 16,8,24,27 0 0 0 # 1024 0 # # NULL NULL default 0 ts3
create table t2 (a int not null,b int not null,c int not null, primary key(a,b))
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than maxvalue tablespace ts3);
select * from information_schema.partitions where table_schema="test"
and table_name="t2";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t2 x1 NULL 1 NULL RANGE NULL a NULL 5 0 0 0 # 1024 0 # # NULL NULL default 0 ts1
NULL test t2 x2 NULL 2 NULL RANGE NULL a NULL 10 0 0 0 # 1024 0 # # NULL NULL default 0 ts2
NULL test t2 x3 NULL 3 NULL RANGE NULL a NULL MAXVALUE 0 0 0 # 1024 0 # # NULL NULL default 0 ts3
create table t3 (f1 date)
partition by hash(month(f1))
partitions 3;
select * from information_schema.partitions where table_schema="test"
and table_name="t3";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t3 p0 NULL 1 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
NULL test t3 p1 NULL 2 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
NULL test t3 p2 NULL 3 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
create table t4 (f1 date, f2 int)
partition by key(f1,f2)
partitions 3;
select * from information_schema.partitions where table_schema="test"
and table_name="t4";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t4 p0 NULL 1 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
NULL test t4 p1 NULL 2 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
NULL test t4 p2 NULL 3 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
drop table t1,t2,t3,t4;
create table t1 (a int not null,b int not null,c int not null,primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1,
subpartition x12 tablespace t2),
partition x2 values less than (5)
( subpartition x21 tablespace t1,
subpartition x22 tablespace t2)
);
create table t2 (a int not null,b int not null,c int not null,primary key (a,b))
partition by range (a)
subpartition by key (a)
( partition x1 values less than (1)
( subpartition x11 tablespace t1,
subpartition x12 tablespace t2),
partition x2 values less than (5)
( subpartition x21 tablespace t1,
subpartition x22 tablespace t2)
);
select * from information_schema.partitions where table_schema="test";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t1 x1 x11 1 1 RANGE HASH a a+b 1 0 0 0 # 1024 0 # # NULL NULL default 0 t1
NULL test t1 x1 x12 1 2 RANGE HASH a a+b 1 0 0 0 # 1024 0 # # NULL NULL default 0 t2
NULL test t1 x2 x21 2 1 RANGE HASH a a+b 5 0 0 0 # 1024 0 # # NULL NULL default 0 t1
NULL test t1 x2 x22 2 2 RANGE HASH a a+b 5 0 0 0 # 1024 0 # # NULL NULL default 0 t2
NULL test t2 x1 x11 1 1 RANGE KEY a a 1 0 0 0 # 1024 0 # # NULL NULL default 0 t1
NULL test t2 x1 x12 1 2 RANGE KEY a a 1 0 0 0 # 1024 0 # # NULL NULL default 0 t2
NULL test t2 x2 x21 2 1 RANGE KEY a a 5 0 0 0 # 1024 0 # # NULL NULL default 0 t1
NULL test t2 x2 x22 2 2 RANGE KEY a a 5 0 0 0 # 1024 0 # # NULL NULL default 0 t2
drop table t1,t2;
create table t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 nodegroup 0,
subpartition x12 tablespace t2 nodegroup 1),
partition x2 values less than (5)
( subpartition x21 tablespace t1 nodegroup 0,
subpartition x22 tablespace t2 nodegroup 1)
);
select * from information_schema.partitions where table_schema="test";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t1 x1 x11 1 1 RANGE HASH a a+b 1 0 0 0 # 1024 0 # # NULL NULL default 0 t1
NULL test t1 x1 x12 1 2 RANGE HASH a a+b 1 0 0 0 # 1024 0 # # NULL NULL default 1 t2
NULL test t1 x2 x21 2 1 RANGE HASH a a+b 5 0 0 0 # 1024 0 # # NULL NULL default 0 t1
NULL test t1 x2 x22 2 2 RANGE HASH a a+b 5 0 0 0 # 1024 0 # # NULL NULL default 1 t2
show tables;
Tables_in_test
t1
drop table t1;
create table t1(f1 int, f2 int);
select * from information_schema.partitions where table_schema="test";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t1 NULL NULL NULL NULL NULL NULL NULL NULL NULL 0 0 0 # 1024 0 # # NULL NULL 0
drop table t1;
create table t1 (f1 date)
partition by linear hash(month(f1))
partitions 3;
select * from information_schema.partitions where table_schema="test"
and table_name="t1";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t1 p0 NULL 1 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
NULL test t1 p1 NULL 2 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
NULL test t1 p2 NULL 3 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
drop table t1;
...@@ -15,6 +15,11 @@ INSERT into t1 values (1, 1, 1); ...@@ -15,6 +15,11 @@ INSERT into t1 values (1, 1, 1);
INSERT into t1 values (6, 1, 1); INSERT into t1 values (6, 1, 1);
INSERT into t1 values (10, 1, 1); INSERT into t1 values (10, 1, 1);
INSERT into t1 values (15, 1, 1); INSERT into t1 values (15, 1, 1);
select * from information_schema.partitions where table_name= 't1';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t1 x1 NULL 1 NULL RANGE NULL a NULL 5 0 0 0 # 0 0 # # NULL NULL default 0 default
NULL test t1 x2 NULL 2 NULL RANGE NULL a NULL 10 0 0 0 # 0 0 # # NULL NULL default 0 default
NULL test t1 x3 NULL 3 NULL RANGE NULL a NULL 20 0 0 0 # 0 0 # # NULL NULL default 0 default
select * from t1 order by a; select * from t1 order by a;
a b c a b c
1 1 1 1 1 1
......
-- source include/have_partition.inc
--disable_warnings
drop table if exists t1,t2,t3,t4;
--enable_warnings
create table t1 (a int not null,b int not null,c int not null, primary key(a,b))
partition by list (b*a)
(partition x1 values in (1) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
--replace_column 16 # 19 # 20 #
select * from information_schema.partitions where table_schema="test"
and table_name="t1";
create table t2 (a int not null,b int not null,c int not null, primary key(a,b))
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than maxvalue tablespace ts3);
--replace_column 16 # 19 # 20 #
select * from information_schema.partitions where table_schema="test"
and table_name="t2";
create table t3 (f1 date)
partition by hash(month(f1))
partitions 3;
--replace_column 16 # 19 # 20 #
select * from information_schema.partitions where table_schema="test"
and table_name="t3";
create table t4 (f1 date, f2 int)
partition by key(f1,f2)
partitions 3;
--replace_column 16 # 19 # 20 #
select * from information_schema.partitions where table_schema="test"
and table_name="t4";
drop table t1,t2,t3,t4;
create table t1 (a int not null,b int not null,c int not null,primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1,
subpartition x12 tablespace t2),
partition x2 values less than (5)
( subpartition x21 tablespace t1,
subpartition x22 tablespace t2)
);
create table t2 (a int not null,b int not null,c int not null,primary key (a,b))
partition by range (a)
subpartition by key (a)
( partition x1 values less than (1)
( subpartition x11 tablespace t1,
subpartition x12 tablespace t2),
partition x2 values less than (5)
( subpartition x21 tablespace t1,
subpartition x22 tablespace t2)
);
--replace_column 16 # 19 # 20 #
select * from information_schema.partitions where table_schema="test";
drop table t1,t2;
create table t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 nodegroup 0,
subpartition x12 tablespace t2 nodegroup 1),
partition x2 values less than (5)
( subpartition x21 tablespace t1 nodegroup 0,
subpartition x22 tablespace t2 nodegroup 1)
);
--replace_column 16 # 19 # 20 #
select * from information_schema.partitions where table_schema="test";
show tables;
drop table t1;
create table t1(f1 int, f2 int);
--replace_column 16 # 19 # 20 #
select * from information_schema.partitions where table_schema="test";
drop table t1;
create table t1 (f1 date)
partition by linear hash(month(f1))
partitions 3;
--replace_column 16 # 19 # 20 #
select * from information_schema.partitions where table_schema="test"
and table_name="t1";
drop table t1;
...@@ -32,6 +32,9 @@ INSERT into t1 values (6, 1, 1); ...@@ -32,6 +32,9 @@ INSERT into t1 values (6, 1, 1);
INSERT into t1 values (10, 1, 1); INSERT into t1 values (10, 1, 1);
INSERT into t1 values (15, 1, 1); INSERT into t1 values (15, 1, 1);
--replace_column 16 # 19 # 20 #
select * from information_schema.partitions where table_name= 't1';
select * from t1 order by a; select * from t1 order by a;
select * from t1 where a=1 order by a; select * from t1 where a=1 order by a;
......
...@@ -3122,6 +3122,20 @@ void ha_ndbcluster::info(uint flag) ...@@ -3122,6 +3122,20 @@ void ha_ndbcluster::info(uint flag)
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
} }
void ha_ndbcluster::get_dynamic_partition_info(PARTITION_INFO *stat_info,
uint part_id)
{
/*
This functions should be fixed. Suggested fix: to
implement ndb function which retrives the statistics
about ndb partitions.
*/
bzero((char*) stat_info, sizeof(PARTITION_INFO));
return;
}
int ha_ndbcluster::extra(enum ha_extra_function operation) int ha_ndbcluster::extra(enum ha_extra_function operation)
{ {
DBUG_ENTER("extra"); DBUG_ENTER("extra");
......
...@@ -522,6 +522,7 @@ class ha_ndbcluster: public handler ...@@ -522,6 +522,7 @@ class ha_ndbcluster: public handler
bool get_error_message(int error, String *buf); bool get_error_message(int error, String *buf);
void info(uint); void info(uint);
void get_dynamic_partition_info(PARTITION_INFO *stat_info, uint part_id);
int extra(enum ha_extra_function operation); int extra(enum ha_extra_function operation);
int extra_opt(enum ha_extra_function operation, ulong cache_size); int extra_opt(enum ha_extra_function operation, ulong cache_size);
int external_lock(THD *thd, int lock_type); int external_lock(THD *thd, int lock_type);
......
...@@ -2444,6 +2444,29 @@ void ha_partition::info(uint flag) ...@@ -2444,6 +2444,29 @@ void ha_partition::info(uint flag)
} }
void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info,
uint part_id)
{
handler *file= m_file[part_id];
file->info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
HA_STATUS_NO_LOCK);
stat_info->records= file->records;
stat_info->mean_rec_length= file->mean_rec_length;
stat_info->data_file_length= file->data_file_length;
stat_info->max_data_file_length= file->max_data_file_length;
stat_info->index_file_length= file->index_file_length;
stat_info->delete_length= file->delete_length;
stat_info->create_time= file->create_time;
stat_info->update_time= file->update_time;
stat_info->check_time= file->check_time;
stat_info->check_sum= 0;
if (file->table_flags() & (ulong) HA_HAS_CHECKSUM)
stat_info->check_sum= file->checksum();
return;
}
/* /*
extra() is called whenever the server wishes to send a hint to extra() is called whenever the server wishes to send a hint to
the storage engine. The MyISAM engine implements the most hints. the storage engine. The MyISAM engine implements the most hints.
......
...@@ -18,6 +18,11 @@ ...@@ -18,6 +18,11 @@
#pragma interface /* gcc class implementation */ #pragma interface /* gcc class implementation */
#endif #endif
enum partition_keywords
{
PKW_HASH= 0, PKW_RANGE, PKW_LIST, PKW_KEY, PKW_MAXVALUE, PKW_LINEAR
};
/* /*
PARTITION_SHARE is a structure that will be shared amoung all open handlers PARTITION_SHARE is a structure that will be shared amoung all open handlers
The partition implements the minimum of what you will probably need. The partition implements the minimum of what you will probably need.
...@@ -408,6 +413,8 @@ class ha_partition :public handler ...@@ -408,6 +413,8 @@ class ha_partition :public handler
------------------------------------------------------------------------- -------------------------------------------------------------------------
*/ */
virtual void info(uint); virtual void info(uint);
void get_dynamic_partition_info(PARTITION_INFO *stat_info,
uint part_id);
virtual int extra(enum ha_extra_function operation); virtual int extra(enum ha_extra_function operation);
virtual int extra_opt(enum ha_extra_function operation, ulong cachesize); virtual int extra_opt(enum ha_extra_function operation, ulong cachesize);
virtual int reset(void); virtual int reset(void);
......
...@@ -2096,6 +2096,26 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen) ...@@ -2096,6 +2096,26 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen)
} }
void handler::get_dynamic_partition_info(PARTITION_INFO *stat_info, uint part_id)
{
info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
HA_STATUS_NO_LOCK);
stat_info->records= records;
stat_info->mean_rec_length= mean_rec_length;
stat_info->data_file_length= data_file_length;
stat_info->max_data_file_length= max_data_file_length;
stat_info->index_file_length= index_file_length;
stat_info->delete_length= delete_length;
stat_info->create_time= create_time;
stat_info->update_time= update_time;
stat_info->check_time= check_time;
stat_info->check_sum= 0;
if (table_flags() & (ulong) HA_HAS_CHECKSUM)
stat_info->check_sum= checksum();
return;
}
/**************************************************************************** /****************************************************************************
** Some general functions that isn't in the handler class ** Some general functions that isn't in the handler class
****************************************************************************/ ****************************************************************************/
......
...@@ -494,6 +494,19 @@ enum partition_state { ...@@ -494,6 +494,19 @@ enum partition_state {
PART_ADDED= 6 PART_ADDED= 6
}; };
typedef struct {
ulonglong data_file_length;
ulonglong max_data_file_length;
ulonglong index_file_length;
ulonglong delete_length;
ha_rows records;
ulong mean_rec_length;
time_t create_time;
time_t check_time;
time_t update_time;
ulonglong check_sum;
} PARTITION_INFO;
#define UNDEF_NODEGROUP 65535 #define UNDEF_NODEGROUP 65535
class Item; class Item;
...@@ -1229,6 +1242,8 @@ class handler :public Sql_alloc ...@@ -1229,6 +1242,8 @@ class handler :public Sql_alloc
{ return (ha_rows) 10; } { return (ha_rows) 10; }
virtual void position(const byte *record)=0; virtual void position(const byte *record)=0;
virtual void info(uint)=0; // see my_base.h for full description virtual void info(uint)=0; // see my_base.h for full description
virtual void get_dynamic_partition_info(PARTITION_INFO *stat_info,
uint part_id);
virtual int extra(enum ha_extra_function operation) virtual int extra(enum ha_extra_function operation)
{ return 0; } { return 0; }
virtual int extra_opt(enum ha_extra_function operation, ulong cache_size) virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
......
...@@ -889,6 +889,7 @@ void free_status_vars(); ...@@ -889,6 +889,7 @@ void free_status_vars();
/* information schema */ /* information schema */
extern LEX_STRING information_schema_name; extern LEX_STRING information_schema_name;
const extern LEX_STRING partition_keywords[];
LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str, LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str,
const char* str, uint length, const char* str, uint length,
bool allocate_lex_string); bool allocate_lex_string);
......
...@@ -43,13 +43,18 @@ ...@@ -43,13 +43,18 @@
/* /*
Partition related functions declarations and some static constants; Partition related functions declarations and some static constants;
*/ */
static const char *hash_str= "HASH"; const LEX_STRING partition_keywords[]=
static const char *range_str= "RANGE"; {
static const char *list_str= "LIST"; { (char *) STRING_WITH_LEN("HASH") },
{ (char *) STRING_WITH_LEN("RANGE") },
{ (char *) STRING_WITH_LEN("LIST") },
{ (char *) STRING_WITH_LEN("KEY") },
{ (char *) STRING_WITH_LEN("MAXVALUE") },
{ (char *) STRING_WITH_LEN("LINEAR ") }
};
static const char *part_str= "PARTITION"; static const char *part_str= "PARTITION";
static const char *sub_str= "SUB"; static const char *sub_str= "SUB";
static const char *by_str= "BY"; static const char *by_str= "BY";
static const char *key_str= "KEY";
static const char *space_str= " "; static const char *space_str= " ";
static const char *equal_str= "="; static const char *equal_str= "=";
static const char *end_paren_str= ")"; static const char *end_paren_str= ")";
...@@ -629,9 +634,9 @@ static bool set_up_default_partitions(partition_info *part_info, ...@@ -629,9 +634,9 @@ static bool set_up_default_partitions(partition_info *part_info,
{ {
const char *error_string; const char *error_string;
if (part_info->part_type == RANGE_PARTITION) if (part_info->part_type == RANGE_PARTITION)
error_string= range_str; error_string= partition_keywords[PKW_RANGE].str;
else else
error_string= list_str; error_string= partition_keywords[PKW_LIST].str;
my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_string); my_error(ER_PARTITIONS_MUST_BE_DEFINED_ERROR, MYF(0), error_string);
goto end; goto end;
} }
...@@ -1771,13 +1776,13 @@ bool fix_partition_func(THD *thd, const char *name, TABLE *table) ...@@ -1771,13 +1776,13 @@ bool fix_partition_func(THD *thd, const char *name, TABLE *table)
const char *error_str; const char *error_str;
if (part_info->part_type == RANGE_PARTITION) if (part_info->part_type == RANGE_PARTITION)
{ {
error_str= range_str; error_str= partition_keywords[PKW_RANGE].str;
if (unlikely(check_range_constants(part_info))) if (unlikely(check_range_constants(part_info)))
goto end; goto end;
} }
else if (part_info->part_type == LIST_PARTITION) else if (part_info->part_type == LIST_PARTITION)
{ {
error_str= list_str; error_str= partition_keywords[PKW_LIST].str;
if (unlikely(check_list_constants(part_info))) if (unlikely(check_list_constants(part_info)))
goto end; goto end;
} }
...@@ -1879,7 +1884,7 @@ static int add_part_key_word(File fptr, const char *key_string) ...@@ -1879,7 +1884,7 @@ static int add_part_key_word(File fptr, const char *key_string)
static int add_hash(File fptr) static int add_hash(File fptr)
{ {
return add_part_key_word(fptr, hash_str); return add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
} }
static int add_partition(File fptr) static int add_partition(File fptr)
...@@ -1911,7 +1916,7 @@ static int add_key_partition(File fptr, List<char> field_list) ...@@ -1911,7 +1916,7 @@ static int add_key_partition(File fptr, List<char> field_list)
uint i, no_fields; uint i, no_fields;
int err; int err;
List_iterator<char> part_it(field_list); List_iterator<char> part_it(field_list);
err= add_part_key_word(fptr, key_str); err= add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
no_fields= field_list.elements; no_fields= field_list.elements;
i= 0; i= 0;
do do
...@@ -1993,7 +1998,7 @@ static int add_partition_values(File fptr, partition_info *part_info, ...@@ -1993,7 +1998,7 @@ static int add_partition_values(File fptr, partition_info *part_info,
err+= add_end_parenthesis(fptr); err+= add_end_parenthesis(fptr);
} }
else else
err+= add_string(fptr, "MAXVALUE"); err+= add_string(fptr, partition_keywords[PKW_MAXVALUE].str);
} }
else if (part_info->part_type == LIST_PARTITION) else if (part_info->part_type == LIST_PARTITION)
{ {
...@@ -2081,15 +2086,15 @@ char *generate_partition_syntax(partition_info *part_info, ...@@ -2081,15 +2086,15 @@ char *generate_partition_syntax(partition_info *part_info,
{ {
case RANGE_PARTITION: case RANGE_PARTITION:
add_default_info= TRUE; add_default_info= TRUE;
err+= add_part_key_word(fptr, range_str); err+= add_part_key_word(fptr, partition_keywords[PKW_RANGE].str);
break; break;
case LIST_PARTITION: case LIST_PARTITION:
add_default_info= TRUE; add_default_info= TRUE;
err+= add_part_key_word(fptr, list_str); err+= add_part_key_word(fptr, partition_keywords[PKW_LIST].str);
break; break;
case HASH_PARTITION: case HASH_PARTITION:
if (part_info->linear_hash_ind) if (part_info->linear_hash_ind)
err+= add_string(fptr, "LINEAR "); err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
if (part_info->list_of_part_fields) if (part_info->list_of_part_fields)
err+= add_key_partition(fptr, part_info->part_field_list); err+= add_key_partition(fptr, part_info->part_field_list);
else else
......
This diff is collapsed.
...@@ -332,6 +332,7 @@ enum enum_schema_tables ...@@ -332,6 +332,7 @@ enum enum_schema_tables
SCH_ENGINES, SCH_ENGINES,
SCH_KEY_COLUMN_USAGE, SCH_KEY_COLUMN_USAGE,
SCH_OPEN_TABLES, SCH_OPEN_TABLES,
SCH_PARTITIONS,
SCH_PLUGINS, SCH_PLUGINS,
SCH_PROCEDURES, SCH_PROCEDURES,
SCH_SCHEMATA, SCH_SCHEMATA,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment