Commit 029f16a1 authored by unknown's avatar unknown

Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.1-new

into  poseidon.ndb.mysql.com:/home/tomas/mysql-5.1-new


sql/ha_ndbcluster_binlog.cc:
  Auto merged
sql/sql_table.cc:
  Auto merged
parents de7ac129 46fc7653
...@@ -28,3 +28,11 @@ partitions 3 ...@@ -28,3 +28,11 @@ partitions 3
partition x2 values less than (10), partition x2 values less than (10),
partition x3 values less than (20)); partition x3 values less than (20));
drop table t1; drop table t1;
CREATE TABLE t1 (id INT) ENGINE=NDB
PARTITION BY LIST(id)
(PARTITION p0 VALUES IN (2, 4),
PARTITION p1 VALUES IN (42, 142));
INSERT INTO t1 VALUES (2);
UPDATE t1 SET id=5 WHERE id=2;
ERROR HY000: Table has no partition for value 5
DROP TABLE t1;
...@@ -117,3 +117,107 @@ t1 CREATE TABLE `t1` ( ...@@ -117,3 +117,107 @@ t1 CREATE TABLE `t1` (
UNIQUE KEY `a` (`a`) UNIQUE KEY `a` (`a`)
) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (b) (PARTITION x1 VALUES LESS THAN (5) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (10) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (20) ENGINE = ndbcluster) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (b) (PARTITION x1 VALUES LESS THAN (5) ENGINE = ndbcluster, PARTITION x2 VALUES LESS THAN (10) ENGINE = ndbcluster, PARTITION x3 VALUES LESS THAN (20) ENGINE = ndbcluster)
drop table t1; drop table t1;
CREATE TABLE t1
(id MEDIUMINT NOT NULL,
b1 BIT(8),
vc VARCHAR(255),
bc CHAR(255),
d DECIMAL(10,4) DEFAULT 0,
f FLOAT DEFAULT 0,
total BIGINT UNSIGNED,
y YEAR,
t DATE) ENGINE=NDB
PARTITION BY RANGE (YEAR(t))
(PARTITION p0 VALUES LESS THAN (1901),
PARTITION p1 VALUES LESS THAN (1946),
PARTITION p2 VALUES LESS THAN (1966),
PARTITION p3 VALUES LESS THAN (1986),
PARTITION p4 VALUES LESS THAN (2005),
PARTITION p5 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (0,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
SELECT * FROM t1;
id b1 vc bc d f total y t
0 NULL NULL NULL NULL NULL NULL NULL NULL
ALTER TABLE t1 ENGINE=MYISAM;
SELECT * FROM t1;
id b1 vc bc d f total y t
0 NULL NULL NULL NULL NULL NULL NULL NULL
DROP TABLE t1;
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
INITIAL_SIZE 16M
UNDO_BUFFER_SIZE=1M
ENGINE=NDB;
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
INITIAL_SIZE 12M
ENGINE NDB;
CREATE TABLE test.t1 (
a1 INT,
a2 TEXT NOT NULL,
a3 BIT NOT NULL,
a4 DECIMAL(8,3),
a5 INT NOT NULL,
a6 VARCHAR(255),
PRIMARY KEY(a1))
TABLESPACE ts1 STORAGE DISK ENGINE=NDB
PARTITION BY LIST (a1)
(PARTITION p0 VALUES IN (1,2,3,4,5),
PARTITION p1 VALUES IN (6,7,8,9, 10),
PARTITION p2 VALUES IN (11, 12, 13, 14, 15));
SELECT COUNT(*) FROM test.t1;
COUNT(*)
15
ALTER TABLE test.t1 DROP COLUMN a4;
SELECT COUNT(*) FROM test.t1;
COUNT(*)
15
DROP TABLE t1;
CREATE TABLE test.t1 (
a1 INT,
a2 TEXT NOT NULL,
a3 BIT NOT NULL,
a4 DECIMAL(8,3),
a5 INT NOT NULL,
a6 VARCHAR(255),
PRIMARY KEY(a1))
TABLESPACE ts1 STORAGE DISK ENGINE=NDB
PARTITION BY HASH(a1)
PARTITIONS 4;
SELECT COUNT(*) FROM test.t1;
COUNT(*)
15
ALTER TABLE test.t1 DROP COLUMN a4;
SELECT COUNT(*) FROM test.t1;
COUNT(*)
15
DROP TABLE t1;
ALTER TABLESPACE ts1
DROP DATAFILE 'datafile.dat'
ENGINE=NDB;
DROP TABLESPACE ts1 ENGINE=NDB;
DROP LOGFILE GROUP lg1 ENGINE=NDB;
CREATE TABLE t1
(id MEDIUMINT NOT NULL,
b1 BIT(8),
vc VARCHAR(255),
bc CHAR(255),
d DECIMAL(10,4) DEFAULT 0,
f FLOAT DEFAULT 0,
total BIGINT UNSIGNED,
y YEAR,
t DATE) ENGINE=NDB
PARTITION BY LIST(id)
(PARTITION p0 VALUES IN (2, 4),
PARTITION p1 VALUES IN (42, 142));
INSERT INTO t1 VALUES (2,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
SELECT * FROM t1;
id b1 vc bc d f total y t
2 NULL NULL NULL NULL NULL NULL NULL NULL
ALTER TABLE t1 ADD PARTITION
(PARTITION p2 VALUES IN (412));
SELECT * FROM t1;
id b1 vc bc d f total y t
2 NULL NULL NULL NULL NULL NULL NULL NULL
DROP TABLE t1;
...@@ -44,3 +44,15 @@ partitions 3 ...@@ -44,3 +44,15 @@ partitions 3
partition x3 values less than (20)); partition x3 values less than (20));
drop table t1; drop table t1;
#
# Bug #17763 mysqld cores with list partitioning if update to missing partition
#
CREATE TABLE t1 (id INT) ENGINE=NDB
PARTITION BY LIST(id)
(PARTITION p0 VALUES IN (2, 4),
PARTITION p1 VALUES IN (42, 142));
INSERT INTO t1 VALUES (2);
--error ER_NO_PARTITION_FOR_GIVEN_VALUE
UPDATE t1 SET id=5 WHERE id=2;
DROP TABLE t1;
...@@ -93,3 +93,133 @@ show create table t1; ...@@ -93,3 +93,133 @@ show create table t1;
drop table t1; drop table t1;
#
# Bug #17499, #17687
# Alter partitioned NDB table causes mysqld to core
#
CREATE TABLE t1
(id MEDIUMINT NOT NULL,
b1 BIT(8),
vc VARCHAR(255),
bc CHAR(255),
d DECIMAL(10,4) DEFAULT 0,
f FLOAT DEFAULT 0,
total BIGINT UNSIGNED,
y YEAR,
t DATE) ENGINE=NDB
PARTITION BY RANGE (YEAR(t))
(PARTITION p0 VALUES LESS THAN (1901),
PARTITION p1 VALUES LESS THAN (1946),
PARTITION p2 VALUES LESS THAN (1966),
PARTITION p3 VALUES LESS THAN (1986),
PARTITION p4 VALUES LESS THAN (2005),
PARTITION p5 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (0,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
SELECT * FROM t1;
ALTER TABLE t1 ENGINE=MYISAM;
SELECT * FROM t1;
DROP TABLE t1;
CREATE LOGFILE GROUP lg1
ADD UNDOFILE 'undofile.dat'
INITIAL_SIZE 16M
UNDO_BUFFER_SIZE=1M
ENGINE=NDB;
CREATE TABLESPACE ts1
ADD DATAFILE 'datafile.dat'
USE LOGFILE GROUP lg1
INITIAL_SIZE 12M
ENGINE NDB;
CREATE TABLE test.t1 (
a1 INT,
a2 TEXT NOT NULL,
a3 BIT NOT NULL,
a4 DECIMAL(8,3),
a5 INT NOT NULL,
a6 VARCHAR(255),
PRIMARY KEY(a1))
TABLESPACE ts1 STORAGE DISK ENGINE=NDB
PARTITION BY LIST (a1)
(PARTITION p0 VALUES IN (1,2,3,4,5),
PARTITION p1 VALUES IN (6,7,8,9, 10),
PARTITION p2 VALUES IN (11, 12, 13, 14, 15));
let $j= 15;
--disable_query_log
while ($j)
{
eval INSERT INTO test.t1 VALUES ($j, "Tested Remotely from Texas, USA",
b'1',$j.00,$j+1,"By NIK $j");
dec $j;
}
--enable_query_log
SELECT COUNT(*) FROM test.t1;
ALTER TABLE test.t1 DROP COLUMN a4;
SELECT COUNT(*) FROM test.t1;
DROP TABLE t1;
CREATE TABLE test.t1 (
a1 INT,
a2 TEXT NOT NULL,
a3 BIT NOT NULL,
a4 DECIMAL(8,3),
a5 INT NOT NULL,
a6 VARCHAR(255),
PRIMARY KEY(a1))
TABLESPACE ts1 STORAGE DISK ENGINE=NDB
PARTITION BY HASH(a1)
PARTITIONS 4;
let $j= 15;
--disable_query_log
while ($j)
{
eval INSERT INTO test.t1 VALUES ($j, "Tested Remotely from Texas, USA",
b'1',$j.00,$j+1,"By NIK $j");
dec $j;
}
--enable_query_log
SELECT COUNT(*) FROM test.t1;
ALTER TABLE test.t1 DROP COLUMN a4;
SELECT COUNT(*) FROM test.t1;
DROP TABLE t1;
ALTER TABLESPACE ts1
DROP DATAFILE 'datafile.dat'
ENGINE=NDB;
DROP TABLESPACE ts1 ENGINE=NDB;
DROP LOGFILE GROUP lg1 ENGINE=NDB;
#
# Bug #17701 ALTER TABLE t1 ADD PARTITION for PARTITION BY LIST hangs test
#
CREATE TABLE t1
(id MEDIUMINT NOT NULL,
b1 BIT(8),
vc VARCHAR(255),
bc CHAR(255),
d DECIMAL(10,4) DEFAULT 0,
f FLOAT DEFAULT 0,
total BIGINT UNSIGNED,
y YEAR,
t DATE) ENGINE=NDB
PARTITION BY LIST(id)
(PARTITION p0 VALUES IN (2, 4),
PARTITION p1 VALUES IN (42, 142));
INSERT INTO t1 VALUES (2,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
SELECT * FROM t1;
ALTER TABLE t1 ADD PARTITION
(PARTITION p2 VALUES IN (412));
SELECT * FROM t1;
DROP TABLE t1;
...@@ -5988,8 +5988,11 @@ void ha_ndbcluster::print_error(int error, myf errflag) ...@@ -5988,8 +5988,11 @@ void ha_ndbcluster::print_error(int error, myf errflag)
DBUG_PRINT("enter", ("error = %d", error)); DBUG_PRINT("enter", ("error = %d", error));
if (error == HA_ERR_NO_PARTITION_FOUND) if (error == HA_ERR_NO_PARTITION_FOUND)
{
char buf[100];
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0), my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0),
(int)m_part_info->part_expr->val_int()); llstr(m_part_info->part_expr->val_int(), buf));
}
else else
handler::print_error(error, errflag); handler::print_error(error, errflag);
DBUG_VOID_RETURN; DBUG_VOID_RETURN;
......
...@@ -284,11 +284,11 @@ ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share, ...@@ -284,11 +284,11 @@ ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share,
it may be in use by the injector thread it may be in use by the injector thread
*/ */
share->ndb_value[0]= (NdbValue*) share->ndb_value[0]= (NdbValue*)
alloc_root(mem_root, sizeof(NdbValue) * table->s->fields alloc_root(mem_root, sizeof(NdbValue) *
+ 1 /*extra for hidden key*/); (table->s->fields + 2 /*extra for hidden key and part key*/));
share->ndb_value[1]= (NdbValue*) share->ndb_value[1]= (NdbValue*)
alloc_root(mem_root, sizeof(NdbValue) * table->s->fields alloc_root(mem_root, sizeof(NdbValue) *
+1 /*extra for hidden key*/); (table->s->fields + 2 /*extra for hidden key and part key*/));
DBUG_RETURN(0); DBUG_RETURN(0);
} }
......
...@@ -130,6 +130,34 @@ ...@@ -130,6 +130,34 @@
#define HA_ONLINE_DROP_UNIQUE_INDEX (1L << 9) /*drop uniq. online*/ #define HA_ONLINE_DROP_UNIQUE_INDEX (1L << 9) /*drop uniq. online*/
#define HA_ONLINE_ADD_PK_INDEX (1L << 10)/*add prim. online*/ #define HA_ONLINE_ADD_PK_INDEX (1L << 10)/*add prim. online*/
#define HA_ONLINE_DROP_PK_INDEX (1L << 11)/*drop prim. online*/ #define HA_ONLINE_DROP_PK_INDEX (1L << 11)/*drop prim. online*/
/*
HA_PARTITION_FUNCTION_SUPPORTED indicates that the function is
supported at all.
HA_FAST_CHANGE_PARTITION means that optimised variants of the changes
exists but they are not necessarily done online.
HA_ONLINE_DOUBLE_WRITE means that the handler supports writing to both
the new partition and to the old partitions when updating through the
old partitioning schema while performing a change of the partitioning.
This means that we can support updating of the table while performing
the copy phase of the change. For no lock at all also a double write
from new to old must exist and this is not required when this flag is
set.
This is actually removed even before it was introduced the first time.
The new idea is that handlers will handle the lock level already in
store_lock for ALTER TABLE partitions.
HA_PARTITION_ONE_PHASE is a flag that can be set by handlers that take
care of changing the partitions online and in one phase. Thus all phases
needed to handle the change are implemented inside the storage engine.
The storage engine must also support auto-discovery since the frm file
is changed as part of the change and this change must be controlled by
the storage engine. A typical engine to support this is NDB (through
WL #2498).
*/
#define HA_PARTITION_FUNCTION_SUPPORTED (1L << 12)
#define HA_FAST_CHANGE_PARTITION (1L << 13)
#define HA_PARTITION_ONE_PHASE (1L << 14)
/* /*
Index scan will not return records in rowid order. Not guaranteed to be Index scan will not return records in rowid order. Not guaranteed to be
......
...@@ -4011,7 +4011,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info, ...@@ -4011,7 +4011,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
is freed by setting version to 0. table->s->version= 0 forces a is freed by setting version to 0. table->s->version= 0 forces a
flush of the table object in close_thread_tables(). flush of the table object in close_thread_tables().
*/ */
uint flags; uint flags= 0;
table->s->version= 0L; table->s->version= 0L;
if (alter_info->flags == ALTER_TABLE_REORG) if (alter_info->flags == ALTER_TABLE_REORG)
{ {
...@@ -4060,7 +4060,10 @@ uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info, ...@@ -4060,7 +4060,10 @@ uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info,
my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0)); my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0));
DBUG_RETURN(1); DBUG_RETURN(1);
} }
*fast_alter_partition= flags ^ HA_PARTITION_FUNCTION_SUPPORTED; *fast_alter_partition=
((flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE)) != 0);
DBUG_PRINT("info", ("*fast_alter_partition: %d flags: 0x%x",
*fast_alter_partition, flags));
if (alter_info->flags & ALTER_ADD_PARTITION) if (alter_info->flags & ALTER_ADD_PARTITION)
{ {
/* /*
...@@ -4660,7 +4663,6 @@ the generated partition syntax in a correct manner. ...@@ -4660,7 +4663,6 @@ the generated partition syntax in a correct manner.
DBUG_ASSERT(FALSE); DBUG_ASSERT(FALSE);
} }
*partition_changed= TRUE; *partition_changed= TRUE;
create_info->db_type= &partition_hton;
thd->lex->part_info= tab_part_info; thd->lex->part_info= tab_part_info;
if (alter_info->flags == ALTER_ADD_PARTITION || if (alter_info->flags == ALTER_ADD_PARTITION ||
alter_info->flags == ALTER_REORGANIZE_PARTITION) alter_info->flags == ALTER_REORGANIZE_PARTITION)
......
...@@ -24,35 +24,6 @@ ...@@ -24,35 +24,6 @@
#define HA_CAN_PARTITION_UNIQUE (1 << 2) #define HA_CAN_PARTITION_UNIQUE (1 << 2)
#define HA_USE_AUTO_PARTITION (1 << 3) #define HA_USE_AUTO_PARTITION (1 << 3)
/*
HA_PARTITION_FUNCTION_SUPPORTED indicates that the function is
supported at all.
HA_FAST_CHANGE_PARTITION means that optimised variants of the changes
exists but they are not necessarily done online.
HA_ONLINE_DOUBLE_WRITE means that the handler supports writing to both
the new partition and to the old partitions when updating through the
old partitioning schema while performing a change of the partitioning.
This means that we can support updating of the table while performing
the copy phase of the change. For no lock at all also a double write
from new to old must exist and this is not required when this flag is
set.
This is actually removed even before it was introduced the first time.
The new idea is that handlers will handle the lock level already in
store_lock for ALTER TABLE partitions.
HA_PARTITION_ONE_PHASE is a flag that can be set by handlers that take
care of changing the partitions online and in one phase. Thus all phases
needed to handle the change are implemented inside the storage engine.
The storage engine must also support auto-discovery since the frm file
is changed as part of the change and this change must be controlled by
the storage engine. A typical engine to support this is NDB (through
WL #2498).
*/
#define HA_PARTITION_FUNCTION_SUPPORTED (1L << 12)
#define HA_FAST_CHANGE_PARTITION (1L << 13)
#define HA_PARTITION_ONE_PHASE (1L << 14)
/*typedef struct { /*typedef struct {
ulonglong data_file_length; ulonglong data_file_length;
ulonglong max_data_file_length; ulonglong max_data_file_length;
......
...@@ -5200,6 +5200,8 @@ copy_data_between_tables(TABLE *from,TABLE *to, ...@@ -5200,6 +5200,8 @@ copy_data_between_tables(TABLE *from,TABLE *to,
*/ */
to->file->ha_set_all_bits_in_write_set(); to->file->ha_set_all_bits_in_write_set();
from->file->ha_retrieve_all_cols(); from->file->ha_retrieve_all_cols();
if (from->part_info)
bitmap_set_all(&(from->part_info->used_partitions));
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1); init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1);
if (ignore || if (ignore ||
handle_duplicates == DUP_REPLACE) handle_duplicates == DUP_REPLACE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment