Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
be4ebd90
Commit
be4ebd90
authored
Jan 10, 2006
by
gluh@eagle.intranet.mysql.r18.ru
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
WL#2506: Information Schema tables for PARTITIONing
added I_S 'PARTITIONS' table
parent
12fcc105
Changes
16
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
640 additions
and
17 deletions
+640
-17
mysql-test/r/information_schema.result
mysql-test/r/information_schema.result
+10
-2
mysql-test/r/information_schema_db.result
mysql-test/r/information_schema_db.result
+1
-0
mysql-test/r/information_schema_part.result
mysql-test/r/information_schema_part.result
+113
-0
mysql-test/r/ndb_partition_range.result
mysql-test/r/ndb_partition_range.result
+5
-0
mysql-test/t/information_schema_part.test
mysql-test/t/information_schema_part.test
+101
-0
mysql-test/t/ndb_partition_range.test
mysql-test/t/ndb_partition_range.test
+3
-0
sql/ha_ndbcluster.cc
sql/ha_ndbcluster.cc
+14
-0
sql/ha_ndbcluster.h
sql/ha_ndbcluster.h
+1
-0
sql/ha_partition.cc
sql/ha_partition.cc
+23
-0
sql/ha_partition.h
sql/ha_partition.h
+7
-0
sql/handler.cc
sql/handler.cc
+20
-0
sql/handler.h
sql/handler.h
+15
-0
sql/mysql_priv.h
sql/mysql_priv.h
+1
-0
sql/sql_partition.cc
sql/sql_partition.cc
+19
-14
sql/sql_show.cc
sql/sql_show.cc
+306
-1
sql/table.h
sql/table.h
+1
-0
No files found.
mysql-test/r/information_schema.result
View file @
be4ebd90
...
...
@@ -43,6 +43,7 @@ COLUMNS
COLUMN_PRIVILEGES
ENGINES
KEY_COLUMN_USAGE
PARTITIONS
PLUGINS
ROUTINES
SCHEMATA
...
...
@@ -725,7 +726,7 @@ CREATE TABLE t_crashme ( f1 BIGINT);
CREATE VIEW a1 (t_CRASHME) AS SELECT f1 FROM t_crashme GROUP BY f1;
CREATE VIEW a2 AS SELECT t_CRASHME FROM a1;
count(*)
10
4
10
5
drop view a2, a1;
drop table t_crashme;
select table_schema,table_name, column_name from
...
...
@@ -733,6 +734,9 @@ information_schema.columns
where data_type = 'longtext';
table_schema table_name column_name
information_schema COLUMNS COLUMN_TYPE
information_schema PARTITIONS PARTITION_EXPRESSION
information_schema PARTITIONS SUBPARTITION_EXPRESSION
information_schema PARTITIONS PARTITION_DESCRIPTION
information_schema PLUGINS PLUGIN_DESCRIPTION
information_schema ROUTINES ROUTINE_DEFINITION
information_schema ROUTINES SQL_MODE
...
...
@@ -744,6 +748,9 @@ information_schema VIEWS VIEW_DEFINITION
select table_name, column_name, data_type from information_schema.columns
where data_type = 'datetime';
table_name column_name data_type
PARTITIONS CREATE_TIME datetime
PARTITIONS UPDATE_TIME datetime
PARTITIONS CHECK_TIME datetime
ROUTINES CREATED datetime
ROUTINES LAST_ALTERED datetime
TABLES CREATE_TIME datetime
...
...
@@ -786,6 +793,7 @@ TABLE_NAME COLUMN_NAME PRIVILEGES
COLUMNS TABLE_NAME select
COLUMN_PRIVILEGES TABLE_NAME select
KEY_COLUMN_USAGE TABLE_NAME select
PARTITIONS TABLE_NAME select
STATISTICS TABLE_NAME select
TABLES TABLE_NAME select
TABLE_CONSTRAINTS TABLE_NAME select
...
...
@@ -796,7 +804,7 @@ delete from mysql.db where user='mysqltest_4';
flush privileges;
SELECT table_schema, count(*) FROM information_schema.TABLES GROUP BY TABLE_SCHEMA;
table_schema count(*)
information_schema 1
8
information_schema 1
9
mysql 18
create table t1 (i int, j int);
create trigger trg1 before insert on t1 for each row
...
...
mysql-test/r/information_schema_db.result
View file @
be4ebd90
...
...
@@ -8,6 +8,7 @@ COLUMNS
COLUMN_PRIVILEGES
ENGINES
KEY_COLUMN_USAGE
PARTITIONS
PLUGINS
ROUTINES
SCHEMATA
...
...
mysql-test/r/information_schema_part.result
0 → 100644
View file @
be4ebd90
drop table if exists t1,t2,t3,t4;
create table t1 (a int not null,b int not null,c int not null, primary key(a,b))
partition by list (b*a)
(partition x1 values in (1) tablespace ts1,
partition x2 values in (3, 11, 5, 7) tablespace ts2,
partition x3 values in (16, 8, 5+19, 70-43) tablespace ts3);
select * from information_schema.partitions where table_schema="test"
and table_name="t1";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t1 x1 NULL 1 NULL LIST NULL b*a NULL 1 0 0 0 # 1024 0 # # NULL NULL default 0 ts1
NULL test t1 x2 NULL 2 NULL LIST NULL b*a NULL 3,11,5,7 0 0 0 # 1024 0 # # NULL NULL default 0 ts2
NULL test t1 x3 NULL 3 NULL LIST NULL b*a NULL 16,8,24,27 0 0 0 # 1024 0 # # NULL NULL default 0 ts3
create table t2 (a int not null,b int not null,c int not null, primary key(a,b))
partition by range (a)
partitions 3
(partition x1 values less than (5) tablespace ts1,
partition x2 values less than (10) tablespace ts2,
partition x3 values less than maxvalue tablespace ts3);
select * from information_schema.partitions where table_schema="test"
and table_name="t2";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t2 x1 NULL 1 NULL RANGE NULL a NULL 5 0 0 0 # 1024 0 # # NULL NULL default 0 ts1
NULL test t2 x2 NULL 2 NULL RANGE NULL a NULL 10 0 0 0 # 1024 0 # # NULL NULL default 0 ts2
NULL test t2 x3 NULL 3 NULL RANGE NULL a NULL MAXVALUE 0 0 0 # 1024 0 # # NULL NULL default 0 ts3
create table t3 (f1 date)
partition by hash(month(f1))
partitions 3;
select * from information_schema.partitions where table_schema="test"
and table_name="t3";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t3 p0 NULL 1 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
NULL test t3 p1 NULL 2 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
NULL test t3 p2 NULL 3 NULL HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
create table t4 (f1 date, f2 int)
partition by key(f1,f2)
partitions 3;
select * from information_schema.partitions where table_schema="test"
and table_name="t4";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t4 p0 NULL 1 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
NULL test t4 p1 NULL 2 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
NULL test t4 p2 NULL 3 NULL KEY NULL f1,f2 NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
drop table t1,t2,t3,t4;
create table t1 (a int not null,b int not null,c int not null,primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1,
subpartition x12 tablespace t2),
partition x2 values less than (5)
( subpartition x21 tablespace t1,
subpartition x22 tablespace t2)
);
create table t2 (a int not null,b int not null,c int not null,primary key (a,b))
partition by range (a)
subpartition by key (a)
( partition x1 values less than (1)
( subpartition x11 tablespace t1,
subpartition x12 tablespace t2),
partition x2 values less than (5)
( subpartition x21 tablespace t1,
subpartition x22 tablespace t2)
);
select * from information_schema.partitions where table_schema="test";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t1 x1 x11 1 1 RANGE HASH a a+b 1 0 0 0 # 1024 0 # # NULL NULL default 0 t1
NULL test t1 x1 x12 1 2 RANGE HASH a a+b 1 0 0 0 # 1024 0 # # NULL NULL default 0 t2
NULL test t1 x2 x21 2 1 RANGE HASH a a+b 5 0 0 0 # 1024 0 # # NULL NULL default 0 t1
NULL test t1 x2 x22 2 2 RANGE HASH a a+b 5 0 0 0 # 1024 0 # # NULL NULL default 0 t2
NULL test t2 x1 x11 1 1 RANGE KEY a a 1 0 0 0 # 1024 0 # # NULL NULL default 0 t1
NULL test t2 x1 x12 1 2 RANGE KEY a a 1 0 0 0 # 1024 0 # # NULL NULL default 0 t2
NULL test t2 x2 x21 2 1 RANGE KEY a a 5 0 0 0 # 1024 0 # # NULL NULL default 0 t1
NULL test t2 x2 x22 2 2 RANGE KEY a a 5 0 0 0 # 1024 0 # # NULL NULL default 0 t2
drop table t1,t2;
create table t1 (
a int not null,
b int not null,
c int not null,
primary key (a,b))
partition by range (a)
subpartition by hash (a+b)
( partition x1 values less than (1)
( subpartition x11 tablespace t1 nodegroup 0,
subpartition x12 tablespace t2 nodegroup 1),
partition x2 values less than (5)
( subpartition x21 tablespace t1 nodegroup 0,
subpartition x22 tablespace t2 nodegroup 1)
);
select * from information_schema.partitions where table_schema="test";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t1 x1 x11 1 1 RANGE HASH a a+b 1 0 0 0 # 1024 0 # # NULL NULL default 0 t1
NULL test t1 x1 x12 1 2 RANGE HASH a a+b 1 0 0 0 # 1024 0 # # NULL NULL default 1 t2
NULL test t1 x2 x21 2 1 RANGE HASH a a+b 5 0 0 0 # 1024 0 # # NULL NULL default 0 t1
NULL test t1 x2 x22 2 2 RANGE HASH a a+b 5 0 0 0 # 1024 0 # # NULL NULL default 1 t2
show tables;
Tables_in_test
t1
drop table t1;
create table t1(f1 int, f2 int);
select * from information_schema.partitions where table_schema="test";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t1 NULL NULL NULL NULL NULL NULL NULL NULL NULL 0 0 0 # 1024 0 # # NULL NULL 0
drop table t1;
create table t1 (f1 date)
partition by linear hash(month(f1))
partitions 3;
select * from information_schema.partitions where table_schema="test"
and table_name="t1";
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t1 p0 NULL 1 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
NULL test t1 p1 NULL 2 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
NULL test t1 p2 NULL 3 NULL LINEAR HASH NULL month(f1) NULL NULL 0 0 0 # 1024 0 # # NULL NULL default 0 default
drop table t1;
mysql-test/r/ndb_partition_range.result
View file @
be4ebd90
...
...
@@ -15,6 +15,11 @@ INSERT into t1 values (1, 1, 1);
INSERT into t1 values (6, 1, 1);
INSERT into t1 values (10, 1, 1);
INSERT into t1 values (15, 1, 1);
select * from information_schema.partitions where table_name= 't1';
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME PARTITION_NAME SUBPARTITION_NAME PARTITION_ORDINAL_POSITION SUBPARTITION_ORDINAL_POSITION PARTITION_METHOD SUBPARTITION_METHOD PARTITION_EXPRESSION SUBPARTITION_EXPRESSION PARTITION_DESCRIPTION TABLE_ROWS AVG_ROW_LENGTH DATA_LENGTH MAX_DATA_LENGTH INDEX_LENGTH DATA_FREE CREATE_TIME UPDATE_TIME CHECK_TIME CHECKSUM PARTITION_COMMENT NODEGROUP TABLESPACE_NAME
NULL test t1 x1 NULL 1 NULL RANGE NULL a NULL 5 0 0 0 # 0 0 # # NULL NULL default 0 default
NULL test t1 x2 NULL 2 NULL RANGE NULL a NULL 10 0 0 0 # 0 0 # # NULL NULL default 0 default
NULL test t1 x3 NULL 3 NULL RANGE NULL a NULL 20 0 0 0 # 0 0 # # NULL NULL default 0 default
select * from t1 order by a;
a b c
1 1 1
...
...
mysql-test/t/information_schema_part.test
0 → 100644
View file @
be4ebd90
--
source
include
/
have_partition
.
inc
--
disable_warnings
drop
table
if
exists
t1
,
t2
,
t3
,
t4
;
--
enable_warnings
create
table
t1
(
a
int
not
null
,
b
int
not
null
,
c
int
not
null
,
primary
key
(
a
,
b
))
partition
by
list
(
b
*
a
)
(
partition
x1
values
in
(
1
)
tablespace
ts1
,
partition
x2
values
in
(
3
,
11
,
5
,
7
)
tablespace
ts2
,
partition
x3
values
in
(
16
,
8
,
5
+
19
,
70
-
43
)
tablespace
ts3
);
--
replace_column
16
# 19 # 20 #
select
*
from
information_schema
.
partitions
where
table_schema
=
"test"
and
table_name
=
"t1"
;
create
table
t2
(
a
int
not
null
,
b
int
not
null
,
c
int
not
null
,
primary
key
(
a
,
b
))
partition
by
range
(
a
)
partitions
3
(
partition
x1
values
less
than
(
5
)
tablespace
ts1
,
partition
x2
values
less
than
(
10
)
tablespace
ts2
,
partition
x3
values
less
than
maxvalue
tablespace
ts3
);
--
replace_column
16
# 19 # 20 #
select
*
from
information_schema
.
partitions
where
table_schema
=
"test"
and
table_name
=
"t2"
;
create
table
t3
(
f1
date
)
partition
by
hash
(
month
(
f1
))
partitions
3
;
--
replace_column
16
# 19 # 20 #
select
*
from
information_schema
.
partitions
where
table_schema
=
"test"
and
table_name
=
"t3"
;
create
table
t4
(
f1
date
,
f2
int
)
partition
by
key
(
f1
,
f2
)
partitions
3
;
--
replace_column
16
# 19 # 20 #
select
*
from
information_schema
.
partitions
where
table_schema
=
"test"
and
table_name
=
"t4"
;
drop
table
t1
,
t2
,
t3
,
t4
;
create
table
t1
(
a
int
not
null
,
b
int
not
null
,
c
int
not
null
,
primary
key
(
a
,
b
))
partition
by
range
(
a
)
subpartition
by
hash
(
a
+
b
)
(
partition
x1
values
less
than
(
1
)
(
subpartition
x11
tablespace
t1
,
subpartition
x12
tablespace
t2
),
partition
x2
values
less
than
(
5
)
(
subpartition
x21
tablespace
t1
,
subpartition
x22
tablespace
t2
)
);
create
table
t2
(
a
int
not
null
,
b
int
not
null
,
c
int
not
null
,
primary
key
(
a
,
b
))
partition
by
range
(
a
)
subpartition
by
key
(
a
)
(
partition
x1
values
less
than
(
1
)
(
subpartition
x11
tablespace
t1
,
subpartition
x12
tablespace
t2
),
partition
x2
values
less
than
(
5
)
(
subpartition
x21
tablespace
t1
,
subpartition
x22
tablespace
t2
)
);
--
replace_column
16
# 19 # 20 #
select
*
from
information_schema
.
partitions
where
table_schema
=
"test"
;
drop
table
t1
,
t2
;
create
table
t1
(
a
int
not
null
,
b
int
not
null
,
c
int
not
null
,
primary
key
(
a
,
b
))
partition
by
range
(
a
)
subpartition
by
hash
(
a
+
b
)
(
partition
x1
values
less
than
(
1
)
(
subpartition
x11
tablespace
t1
nodegroup
0
,
subpartition
x12
tablespace
t2
nodegroup
1
),
partition
x2
values
less
than
(
5
)
(
subpartition
x21
tablespace
t1
nodegroup
0
,
subpartition
x22
tablespace
t2
nodegroup
1
)
);
--
replace_column
16
# 19 # 20 #
select
*
from
information_schema
.
partitions
where
table_schema
=
"test"
;
show
tables
;
drop
table
t1
;
create
table
t1
(
f1
int
,
f2
int
);
--
replace_column
16
# 19 # 20 #
select
*
from
information_schema
.
partitions
where
table_schema
=
"test"
;
drop
table
t1
;
create
table
t1
(
f1
date
)
partition
by
linear
hash
(
month
(
f1
))
partitions
3
;
--
replace_column
16
# 19 # 20 #
select
*
from
information_schema
.
partitions
where
table_schema
=
"test"
and
table_name
=
"t1"
;
drop
table
t1
;
mysql-test/t/ndb_partition_range.test
View file @
be4ebd90
...
...
@@ -32,6 +32,9 @@ INSERT into t1 values (6, 1, 1);
INSERT
into
t1
values
(
10
,
1
,
1
);
INSERT
into
t1
values
(
15
,
1
,
1
);
--
replace_column
16
# 19 # 20 #
select
*
from
information_schema
.
partitions
where
table_name
=
't1'
;
select
*
from
t1
order
by
a
;
select
*
from
t1
where
a
=
1
order
by
a
;
...
...
sql/ha_ndbcluster.cc
View file @
be4ebd90
...
...
@@ -3122,6 +3122,20 @@ void ha_ndbcluster::info(uint flag)
DBUG_VOID_RETURN
;
}
void
ha_ndbcluster
::
get_dynamic_partition_info
(
PARTITION_INFO
*
stat_info
,
uint
part_id
)
{
/*
This functions should be fixed. Suggested fix: to
implement ndb function which retrives the statistics
about ndb partitions.
*/
bzero
((
char
*
)
stat_info
,
sizeof
(
PARTITION_INFO
));
return
;
}
int
ha_ndbcluster
::
extra
(
enum
ha_extra_function
operation
)
{
DBUG_ENTER
(
"extra"
);
...
...
sql/ha_ndbcluster.h
View file @
be4ebd90
...
...
@@ -522,6 +522,7 @@ class ha_ndbcluster: public handler
bool
get_error_message
(
int
error
,
String
*
buf
);
void
info
(
uint
);
void
get_dynamic_partition_info
(
PARTITION_INFO
*
stat_info
,
uint
part_id
);
int
extra
(
enum
ha_extra_function
operation
);
int
extra_opt
(
enum
ha_extra_function
operation
,
ulong
cache_size
);
int
external_lock
(
THD
*
thd
,
int
lock_type
);
...
...
sql/ha_partition.cc
View file @
be4ebd90
...
...
@@ -2444,6 +2444,29 @@ void ha_partition::info(uint flag)
}
void
ha_partition
::
get_dynamic_partition_info
(
PARTITION_INFO
*
stat_info
,
uint
part_id
)
{
handler
*
file
=
m_file
[
part_id
];
file
->
info
(
HA_STATUS_CONST
|
HA_STATUS_TIME
|
HA_STATUS_VARIABLE
|
HA_STATUS_NO_LOCK
);
stat_info
->
records
=
file
->
records
;
stat_info
->
mean_rec_length
=
file
->
mean_rec_length
;
stat_info
->
data_file_length
=
file
->
data_file_length
;
stat_info
->
max_data_file_length
=
file
->
max_data_file_length
;
stat_info
->
index_file_length
=
file
->
index_file_length
;
stat_info
->
delete_length
=
file
->
delete_length
;
stat_info
->
create_time
=
file
->
create_time
;
stat_info
->
update_time
=
file
->
update_time
;
stat_info
->
check_time
=
file
->
check_time
;
stat_info
->
check_sum
=
0
;
if
(
file
->
table_flags
()
&
(
ulong
)
HA_HAS_CHECKSUM
)
stat_info
->
check_sum
=
file
->
checksum
();
return
;
}
/*
extra() is called whenever the server wishes to send a hint to
the storage engine. The MyISAM engine implements the most hints.
...
...
sql/ha_partition.h
View file @
be4ebd90
...
...
@@ -18,6 +18,11 @@
#pragma interface
/* gcc class implementation */
#endif
enum
partition_keywords
{
PKW_HASH
=
0
,
PKW_RANGE
,
PKW_LIST
,
PKW_KEY
,
PKW_MAXVALUE
,
PKW_LINEAR
};
/*
PARTITION_SHARE is a structure that will be shared amoung all open handlers
The partition implements the minimum of what you will probably need.
...
...
@@ -408,6 +413,8 @@ public:
-------------------------------------------------------------------------
*/
virtual
void
info
(
uint
);
void
get_dynamic_partition_info
(
PARTITION_INFO
*
stat_info
,
uint
part_id
);
virtual
int
extra
(
enum
ha_extra_function
operation
);
virtual
int
extra_opt
(
enum
ha_extra_function
operation
,
ulong
cachesize
);
virtual
int
reset
(
void
);
...
...
sql/handler.cc
View file @
be4ebd90
...
...
@@ -2096,6 +2096,26 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen)
}
void
handler
::
get_dynamic_partition_info
(
PARTITION_INFO
*
stat_info
,
uint
part_id
)
{
info
(
HA_STATUS_CONST
|
HA_STATUS_TIME
|
HA_STATUS_VARIABLE
|
HA_STATUS_NO_LOCK
);
stat_info
->
records
=
records
;
stat_info
->
mean_rec_length
=
mean_rec_length
;
stat_info
->
data_file_length
=
data_file_length
;
stat_info
->
max_data_file_length
=
max_data_file_length
;
stat_info
->
index_file_length
=
index_file_length
;
stat_info
->
delete_length
=
delete_length
;
stat_info
->
create_time
=
create_time
;
stat_info
->
update_time
=
update_time
;
stat_info
->
check_time
=
check_time
;
stat_info
->
check_sum
=
0
;
if
(
table_flags
()
&
(
ulong
)
HA_HAS_CHECKSUM
)
stat_info
->
check_sum
=
checksum
();
return
;
}
/****************************************************************************
** Some general functions that isn't in the handler class
****************************************************************************/
...
...
sql/handler.h
View file @
be4ebd90
...
...
@@ -494,6 +494,19 @@ enum partition_state {
PART_ADDED
=
6
};
typedef
struct
{
ulonglong
data_file_length
;
ulonglong
max_data_file_length
;
ulonglong
index_file_length
;
ulonglong
delete_length
;
ha_rows
records
;
ulong
mean_rec_length
;
time_t
create_time
;
time_t
check_time
;
time_t
update_time
;
ulonglong
check_sum
;
}
PARTITION_INFO
;
#define UNDEF_NODEGROUP 65535
class
Item
;
...
...
@@ -1229,6 +1242,8 @@ public:
{
return
(
ha_rows
)
10
;
}
virtual
void
position
(
const
byte
*
record
)
=
0
;
virtual
void
info
(
uint
)
=
0
;
// see my_base.h for full description
virtual
void
get_dynamic_partition_info
(
PARTITION_INFO
*
stat_info
,
uint
part_id
);
virtual
int
extra
(
enum
ha_extra_function
operation
)
{
return
0
;
}
virtual
int
extra_opt
(
enum
ha_extra_function
operation
,
ulong
cache_size
)
...
...
sql/mysql_priv.h
View file @
be4ebd90
...
...
@@ -889,6 +889,7 @@ void free_status_vars();
/* information schema */
extern
LEX_STRING
information_schema_name
;
const
extern
LEX_STRING
partition_keywords
[];
LEX_STRING
*
make_lex_string
(
THD
*
thd
,
LEX_STRING
*
lex_str
,
const
char
*
str
,
uint
length
,
bool
allocate_lex_string
);
...
...
sql/sql_partition.cc
View file @
be4ebd90
...
...
@@ -43,13 +43,18 @@
/*
Partition related functions declarations and some static constants;
*/
static
const
char
*
hash_str
=
"HASH"
;
static
const
char
*
range_str
=
"RANGE"
;
static
const
char
*
list_str
=
"LIST"
;
const
LEX_STRING
partition_keywords
[]
=
{
{
(
char
*
)
STRING_WITH_LEN
(
"HASH"
)
},
{
(
char
*
)
STRING_WITH_LEN
(
"RANGE"
)
},
{
(
char
*
)
STRING_WITH_LEN
(
"LIST"
)
},
{
(
char
*
)
STRING_WITH_LEN
(
"KEY"
)
},
{
(
char
*
)
STRING_WITH_LEN
(
"MAXVALUE"
)
},
{
(
char
*
)
STRING_WITH_LEN
(
"LINEAR "
)
}
};
static
const
char
*
part_str
=
"PARTITION"
;
static
const
char
*
sub_str
=
"SUB"
;
static
const
char
*
by_str
=
"BY"
;
static
const
char
*
key_str
=
"KEY"
;
static
const
char
*
space_str
=
" "
;
static
const
char
*
equal_str
=
"="
;
static
const
char
*
end_paren_str
=
")"
;
...
...
@@ -629,9 +634,9 @@ static bool set_up_default_partitions(partition_info *part_info,
{
const
char
*
error_string
;
if
(
part_info
->
part_type
==
RANGE_PARTITION
)
error_string
=
range_
str
;
error_string
=
partition_keywords
[
PKW_RANGE
].
str
;
else
error_string
=
list_
str
;
error_string
=
partition_keywords
[
PKW_LIST
].
str
;
my_error
(
ER_PARTITIONS_MUST_BE_DEFINED_ERROR
,
MYF
(
0
),
error_string
);
goto
end
;
}
...
...
@@ -1771,13 +1776,13 @@ bool fix_partition_func(THD *thd, const char *name, TABLE *table)
const
char
*
error_str
;
if
(
part_info
->
part_type
==
RANGE_PARTITION
)
{
error_str
=
range_
str
;
error_str
=
partition_keywords
[
PKW_RANGE
].
str
;
if
(
unlikely
(
check_range_constants
(
part_info
)))
goto
end
;
}
else
if
(
part_info
->
part_type
==
LIST_PARTITION
)
{
error_str
=
list_
str
;
error_str
=
partition_keywords
[
PKW_LIST
].
str
;
if
(
unlikely
(
check_list_constants
(
part_info
)))
goto
end
;
}
...
...
@@ -1879,7 +1884,7 @@ static int add_part_key_word(File fptr, const char *key_string)
static
int
add_hash
(
File
fptr
)
{
return
add_part_key_word
(
fptr
,
hash_
str
);
return
add_part_key_word
(
fptr
,
partition_keywords
[
PKW_HASH
].
str
);
}
static
int
add_partition
(
File
fptr
)
...
...
@@ -1911,7 +1916,7 @@ static int add_key_partition(File fptr, List<char> field_list)
uint
i
,
no_fields
;
int
err
;
List_iterator
<
char
>
part_it
(
field_list
);
err
=
add_part_key_word
(
fptr
,
key_
str
);
err
=
add_part_key_word
(
fptr
,
partition_keywords
[
PKW_KEY
].
str
);
no_fields
=
field_list
.
elements
;
i
=
0
;
do
...
...
@@ -1993,7 +1998,7 @@ static int add_partition_values(File fptr, partition_info *part_info,
err
+=
add_end_parenthesis
(
fptr
);
}
else
err
+=
add_string
(
fptr
,
"MAXVALUE"
);
err
+=
add_string
(
fptr
,
partition_keywords
[
PKW_MAXVALUE
].
str
);
}
else
if
(
part_info
->
part_type
==
LIST_PARTITION
)
{
...
...
@@ -2081,15 +2086,15 @@ char *generate_partition_syntax(partition_info *part_info,
{
case
RANGE_PARTITION
:
add_default_info
=
TRUE
;
err
+=
add_part_key_word
(
fptr
,
range_
str
);
err
+=
add_part_key_word
(
fptr
,
partition_keywords
[
PKW_RANGE
].
str
);
break
;
case
LIST_PARTITION
:
add_default_info
=
TRUE
;
err
+=
add_part_key_word
(
fptr
,
list_
str
);
err
+=
add_part_key_word
(
fptr
,
partition_keywords
[
PKW_LIST
].
str
);
break
;
case
HASH_PARTITION
:
if
(
part_info
->
linear_hash_ind
)
err
+=
add_string
(
fptr
,
"LINEAR "
);
err
+=
add_string
(
fptr
,
partition_keywords
[
PKW_LINEAR
].
str
);
if
(
part_info
->
list_of_part_fields
)
err
+=
add_key_partition
(
fptr
,
part_info
->
part_field_list
);
else
...
...
sql/sql_show.cc
View file @
be4ebd90
...
...
@@ -27,6 +27,9 @@
#include "authors.h"
#include <my_dir.h>
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
#endif
static
const
char
*
grant_names
[]
=
{
"select"
,
"insert"
,
"update"
,
"delete"
,
"create"
,
"drop"
,
"reload"
,
"shutdown"
,
...
...
@@ -3487,6 +3490,275 @@ static int get_schema_key_column_usage_record(THD *thd,
}
static
void
collect_partition_expr
(
List
<
char
>
&
field_list
,
String
*
str
)
{
List_iterator
<
char
>
part_it
(
field_list
);
ulong
no_fields
=
field_list
.
elements
;
const
char
*
field_str
;
str
->
length
(
0
);
while
((
field_str
=
part_it
++
))
{
str
->
append
(
field_str
);
if
(
--
no_fields
!=
0
)
str
->
append
(
","
);
}
return
;
}
static
void
store_schema_partitions_record
(
THD
*
thd
,
TABLE
*
table
,
partition_element
*
part_elem
,
handler
*
file
,
uint
part_id
)
{
CHARSET_INFO
*
cs
=
system_charset_info
;
PARTITION_INFO
stat_info
;
TIME
time
;
file
->
get_dynamic_partition_info
(
&
stat_info
,
part_id
);
table
->
field
[
12
]
->
store
((
longlong
)
stat_info
.
records
,
TRUE
);
table
->
field
[
13
]
->
store
((
longlong
)
stat_info
.
mean_rec_length
,
TRUE
);
table
->
field
[
14
]
->
store
((
longlong
)
stat_info
.
data_file_length
,
TRUE
);
if
(
stat_info
.
max_data_file_length
)
{
table
->
field
[
15
]
->
store
((
longlong
)
stat_info
.
max_data_file_length
,
TRUE
);
table
->
field
[
15
]
->
set_notnull
();
}
table
->
field
[
16
]
->
store
((
longlong
)
stat_info
.
index_file_length
,
TRUE
);
table
->
field
[
17
]
->
store
((
longlong
)
stat_info
.
delete_length
,
TRUE
);
if
(
stat_info
.
create_time
)
{
thd
->
variables
.
time_zone
->
gmt_sec_to_TIME
(
&
time
,
stat_info
.
create_time
);
table
->
field
[
18
]
->
store_time
(
&
time
,
MYSQL_TIMESTAMP_DATETIME
);
table
->
field
[
18
]
->
set_notnull
();
}
if
(
stat_info
.
update_time
)
{
thd
->
variables
.
time_zone
->
gmt_sec_to_TIME
(
&
time
,
stat_info
.
update_time
);
table
->
field
[
19
]
->
store_time
(
&
time
,
MYSQL_TIMESTAMP_DATETIME
);
table
->
field
[
19
]
->
set_notnull
();
}
if
(
stat_info
.
check_time
)
{
thd
->
variables
.
time_zone
->
gmt_sec_to_TIME
(
&
time
,
stat_info
.
check_time
);
table
->
field
[
20
]
->
store_time
(
&
time
,
MYSQL_TIMESTAMP_DATETIME
);
table
->
field
[
20
]
->
set_notnull
();
}
if
(
file
->
table_flags
()
&
(
ulong
)
HA_HAS_CHECKSUM
)
{
table
->
field
[
21
]
->
store
((
longlong
)
stat_info
.
check_sum
,
TRUE
);
table
->
field
[
21
]
->
set_notnull
();
}
if
(
part_elem
)
{
if
(
part_elem
->
part_comment
)
table
->
field
[
22
]
->
store
(
part_elem
->
part_comment
,
strlen
(
part_elem
->
part_comment
),
cs
);
else
table
->
field
[
22
]
->
store
(
STRING_WITH_LEN
(
"default"
),
cs
);
if
(
part_elem
->
nodegroup_id
!=
UNDEF_NODEGROUP
)
table
->
field
[
23
]
->
store
((
longlong
)
part_elem
->
nodegroup_id
,
TRUE
);
else
table
->
field
[
23
]
->
store
(
STRING_WITH_LEN
(
"default"
),
cs
);
if
(
part_elem
->
tablespace_name
)
table
->
field
[
24
]
->
store
(
part_elem
->
tablespace_name
,
strlen
(
part_elem
->
tablespace_name
),
cs
);
else
table
->
field
[
24
]
->
store
(
STRING_WITH_LEN
(
"default"
),
cs
);
}
return
;
}
static
int
get_schema_partitions_record
(
THD
*
thd
,
struct
st_table_list
*
tables
,
TABLE
*
table
,
bool
res
,
const
char
*
base_name
,
const
char
*
file_name
)
{
CHARSET_INFO
*
cs
=
system_charset_info
;
char
buff
[
61
];
String
tmp_res
(
buff
,
sizeof
(
buff
),
cs
);
String
tmp_str
;
TIME
time
;
TABLE
*
show_table
=
tables
->
table
;
handler
*
file
=
show_table
->
file
;
partition_info
*
part_info
=
show_table
->
part_info
;
DBUG_ENTER
(
"get_schema_partitions_record"
);
if
(
res
)
{
if
(
part_info
)
push_warning
(
thd
,
MYSQL_ERROR
::
WARN_LEVEL_WARN
,
thd
->
net
.
last_errno
,
thd
->
net
.
last_error
);
thd
->
clear_error
();
DBUG_RETURN
(
0
);
}
if
(
part_info
)
{
partition_element
*
part_elem
;
List_iterator
<
partition_element
>
part_it
(
part_info
->
partitions
);
uint
part_pos
=
0
,
part_id
=
0
;
uint
no_parts
=
part_info
->
no_parts
;
handler
*
part_file
;
restore_record
(
table
,
s
->
default_values
);
table
->
field
[
1
]
->
store
(
base_name
,
strlen
(
base_name
),
cs
);
table
->
field
[
2
]
->
store
(
file_name
,
strlen
(
file_name
),
cs
);
/* Partition method*/
switch
(
part_info
->
part_type
)
{
case
RANGE_PARTITION
:
table
->
field
[
7
]
->
store
(
partition_keywords
[
PKW_RANGE
].
str
,
partition_keywords
[
PKW_RANGE
].
length
,
cs
);
break
;
case
LIST_PARTITION
:
table
->
field
[
7
]
->
store
(
partition_keywords
[
PKW_LIST
].
str
,
partition_keywords
[
PKW_LIST
].
length
,
cs
);
break
;
case
HASH_PARTITION
:
tmp_res
.
length
(
0
);
if
(
part_info
->
linear_hash_ind
)
tmp_res
.
append
(
partition_keywords
[
PKW_LINEAR
].
str
,
partition_keywords
[
PKW_LINEAR
].
length
);
if
(
part_info
->
list_of_part_fields
)
tmp_res
.
append
(
partition_keywords
[
PKW_KEY
].
str
,
partition_keywords
[
PKW_KEY
].
length
);
else
tmp_res
.
append
(
partition_keywords
[
PKW_HASH
].
str
,
partition_keywords
[
PKW_HASH
].
length
);
table
->
field
[
7
]
->
store
(
tmp_res
.
ptr
(),
tmp_res
.
length
(),
cs
);
break
;
default:
DBUG_ASSERT
(
0
);
current_thd
->
fatal_error
();
DBUG_RETURN
(
1
);
}
table
->
field
[
7
]
->
set_notnull
();
/* Partition expression */
if
(
part_info
->
part_expr
)
{
table
->
field
[
9
]
->
store
(
part_info
->
part_func_string
,
part_info
->
part_func_len
,
cs
);
table
->
field
[
9
]
->
set_notnull
();
}
else
if
(
part_info
->
list_of_part_fields
)
{
collect_partition_expr
(
part_info
->
part_field_list
,
&
tmp_str
);
table
->
field
[
9
]
->
store
(
tmp_str
.
ptr
(),
tmp_str
.
length
(),
cs
);
table
->
field
[
9
]
->
set_notnull
();
}
if
(
is_sub_partitioned
(
part_info
))
{
/* Subpartition method */
if
(
part_info
->
list_of_subpart_fields
)
table
->
field
[
8
]
->
store
(
partition_keywords
[
PKW_KEY
].
str
,
partition_keywords
[
PKW_KEY
].
length
,
cs
);
else
table
->
field
[
8
]
->
store
(
partition_keywords
[
PKW_HASH
].
str
,
partition_keywords
[
PKW_HASH
].
length
,
cs
);
table
->
field
[
8
]
->
set_notnull
();
/* Subpartition expression */
if
(
part_info
->
subpart_expr
)
{
table
->
field
[
10
]
->
store
(
part_info
->
subpart_func_string
,
part_info
->
subpart_func_len
,
cs
);
table
->
field
[
10
]
->
set_notnull
();
}
else
if
(
part_info
->
list_of_subpart_fields
)
{
collect_partition_expr
(
part_info
->
subpart_field_list
,
&
tmp_str
);
table
->
field
[
10
]
->
store
(
tmp_str
.
ptr
(),
tmp_str
.
length
(),
cs
);
table
->
field
[
10
]
->
set_notnull
();
}
}
while
((
part_elem
=
part_it
++
))
{
table
->
field
[
3
]
->
store
(
part_elem
->
partition_name
,
strlen
(
part_elem
->
partition_name
),
cs
);
table
->
field
[
3
]
->
set_notnull
();
/* PARTITION_ORDINAL_POSITION */
table
->
field
[
5
]
->
store
((
longlong
)
++
part_pos
,
TRUE
);
table
->
field
[
5
]
->
set_notnull
();
/* Partition description */
if
(
part_info
->
part_type
==
RANGE_PARTITION
)
{
if
(
part_elem
->
range_value
!=
LONGLONG_MAX
)
table
->
field
[
11
]
->
store
((
longlong
)
part_elem
->
range_value
,
FALSE
);
else
table
->
field
[
11
]
->
store
(
partition_keywords
[
PKW_MAXVALUE
].
str
,
partition_keywords
[
PKW_MAXVALUE
].
length
,
cs
);
table
->
field
[
11
]
->
set_notnull
();
}
else
if
(
part_info
->
part_type
==
LIST_PARTITION
)
{
List_iterator
<
longlong
>
list_val_it
(
part_elem
->
list_val_list
);
longlong
*
list_value
;
uint
no_items
=
part_elem
->
list_val_list
.
elements
;
tmp_str
.
length
(
0
);
tmp_res
.
length
(
0
);
while
((
list_value
=
list_val_it
++
))
{
tmp_res
.
set
(
*
list_value
,
cs
);
tmp_str
.
append
(
tmp_res
);
if
(
--
no_items
!=
0
)
tmp_str
.
append
(
","
);
};
table
->
field
[
11
]
->
store
(
tmp_str
.
ptr
(),
tmp_str
.
length
(),
cs
);
table
->
field
[
11
]
->
set_notnull
();
}
if
(
part_elem
->
subpartitions
.
elements
)
{
List_iterator
<
partition_element
>
sub_it
(
part_elem
->
subpartitions
);
partition_element
*
subpart_elem
;
uint
subpart_pos
=
0
;
while
((
subpart_elem
=
sub_it
++
))
{
table
->
field
[
4
]
->
store
(
subpart_elem
->
partition_name
,
strlen
(
subpart_elem
->
partition_name
),
cs
);
table
->
field
[
4
]
->
set_notnull
();
/* SUBPARTITION_ORDINAL_POSITION */
table
->
field
[
6
]
->
store
((
longlong
)
++
subpart_pos
,
TRUE
);
table
->
field
[
6
]
->
set_notnull
();
store_schema_partitions_record
(
thd
,
table
,
subpart_elem
,
file
,
part_id
);
part_id
++
;
if
(
schema_table_store_record
(
thd
,
table
))
DBUG_RETURN
(
1
);
}
}
else
{
store_schema_partitions_record
(
thd
,
table
,
part_elem
,
file
,
part_id
);
part_id
++
;
if
(
schema_table_store_record
(
thd
,
table
))
DBUG_RETURN
(
1
);
}
}
DBUG_RETURN
(
0
);
}
else
{
store_schema_partitions_record
(
thd
,
table
,
0
,
file
,
0
);
if
(
schema_table_store_record
(
thd
,
table
))
DBUG_RETURN
(
1
);
}
DBUG_RETURN
(
0
);
}
int
fill_open_tables
(
THD
*
thd
,
TABLE_LIST
*
tables
,
COND
*
cond
)
{
DBUG_ENTER
(
"fill_open_tables"
);
...
...
@@ -4297,6 +4569,37 @@ ST_FIELD_INFO triggers_fields_info[]=
};
ST_FIELD_INFO
partitions_fields_info
[]
=
{
{
"TABLE_CATALOG"
,
FN_REFLEN
,
MYSQL_TYPE_STRING
,
0
,
1
,
0
},
{
"TABLE_SCHEMA"
,
NAME_LEN
,
MYSQL_TYPE_STRING
,
0
,
0
,
0
},
{
"TABLE_NAME"
,
NAME_LEN
,
MYSQL_TYPE_STRING
,
0
,
0
,
0
},
{
"PARTITION_NAME"
,
NAME_LEN
,
MYSQL_TYPE_STRING
,
0
,
1
,
0
},
{
"SUBPARTITION_NAME"
,
NAME_LEN
,
MYSQL_TYPE_STRING
,
0
,
1
,
0
},
{
"PARTITION_ORDINAL_POSITION"
,
21
,
MYSQL_TYPE_LONG
,
0
,
1
,
0
},
{
"SUBPARTITION_ORDINAL_POSITION"
,
21
,
MYSQL_TYPE_LONG
,
0
,
1
,
0
},
{
"PARTITION_METHOD"
,
12
,
MYSQL_TYPE_STRING
,
0
,
1
,
0
},
{
"SUBPARTITION_METHOD"
,
5
,
MYSQL_TYPE_STRING
,
0
,
1
,
0
},
{
"PARTITION_EXPRESSION"
,
65535
,
MYSQL_TYPE_STRING
,
0
,
1
,
0
},
{
"SUBPARTITION_EXPRESSION"
,
65535
,
MYSQL_TYPE_STRING
,
0
,
1
,
0
},
{
"PARTITION_DESCRIPTION"
,
65535
,
MYSQL_TYPE_STRING
,
0
,
1
,
0
},
{
"TABLE_ROWS"
,
21
,
MYSQL_TYPE_LONG
,
0
,
0
,
0
},
{
"AVG_ROW_LENGTH"
,
21
,
MYSQL_TYPE_LONG
,
0
,
0
,
0
},
{
"DATA_LENGTH"
,
21
,
MYSQL_TYPE_LONG
,
0
,
0
,
0
},
{
"MAX_DATA_LENGTH"
,
21
,
MYSQL_TYPE_LONG
,
0
,
1
,
0
},
{
"INDEX_LENGTH"
,
21
,
MYSQL_TYPE_LONG
,
0
,
0
,
0
},
{
"DATA_FREE"
,
21
,
MYSQL_TYPE_LONG
,
0
,
0
,
0
},
{
"CREATE_TIME"
,
0
,
MYSQL_TYPE_TIMESTAMP
,
0
,
1
,
0
},
{
"UPDATE_TIME"
,
0
,
MYSQL_TYPE_TIMESTAMP
,
0
,
1
,
0
},
{
"CHECK_TIME"
,
0
,
MYSQL_TYPE_TIMESTAMP
,
0
,
1
,
0
},
{
"CHECKSUM"
,
21
,
MYSQL_TYPE_LONG
,
0
,
1
,
0
},
{
"PARTITION_COMMENT"
,
80
,
MYSQL_TYPE_STRING
,
0
,
0
,
0
},
{
"NODEGROUP"
,
21
,
MYSQL_TYPE_LONG
,
0
,
0
,
0
},
{
"TABLESPACE_NAME"
,
NAME_LEN
,
MYSQL_TYPE_STRING
,
0
,
0
,
0
},
{
0
,
0
,
MYSQL_TYPE_STRING
,
0
,
0
,
0
}
};
ST_FIELD_INFO
variables_fields_info
[]
=
{
{
"Variable_name"
,
80
,
MYSQL_TYPE_STRING
,
0
,
0
,
"Variable_name"
},
...
...
@@ -4345,6 +4648,8 @@ ST_SCHEMA_TABLE schema_tables[]=
get_all_tables
,
0
,
get_schema_key_column_usage_record
,
4
,
5
,
0
},
{
"OPEN_TABLES"
,
open_tables_fields_info
,
create_schema_table
,
fill_open_tables
,
make_old_format
,
0
,
-
1
,
-
1
,
1
},
{
"PARTITIONS"
,
partitions_fields_info
,
create_schema_table
,
get_all_tables
,
0
,
get_schema_partitions_record
,
1
,
2
,
0
},
{
"PLUGINS"
,
plugin_fields_info
,
create_schema_table
,
fill_plugins
,
make_old_format
,
0
,
-
1
,
-
1
,
0
},
{
"ROUTINES"
,
proc_fields_info
,
create_schema_table
,
...
...
sql/table.h
View file @
be4ebd90
...
...
@@ -332,6 +332,7 @@ enum enum_schema_tables
SCH_ENGINES
,
SCH_KEY_COLUMN_USAGE
,
SCH_OPEN_TABLES
,
SCH_PARTITIONS
,
SCH_PLUGINS
,
SCH_PROCEDURES
,
SCH_SCHEMATA
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment