Commit 537ba1e1 authored by unknown's avatar unknown

Fix after running on a 4-node system.


mysql-test/r/ndb_autodiscover.result:
  Removed one test that needs manual interaction
  Added order by in some places
mysql-test/t/ndb_autodiscover.test:
  Removed one test that needs manual interaction
  Added order by in some places
sql/ha_ndbcluster.cc:
  Using HA_WRONG_ASCII_ORDER to activate filesort on a not "sorted" ordered index.
parent 5a995429
...@@ -22,11 +22,11 @@ show status like 'handler_discover%'; ...@@ -22,11 +22,11 @@ show status like 'handler_discover%';
Variable_name Value Variable_name Value
Handler_discover 2 Handler_discover 2
flush tables; flush tables;
select * from t1; select * from t1 order by id;
id name id name
1 Autodiscover
2 Auto 2 2 Auto 2
3 Discover 3 3 Discover 3
1 Autodiscover
show status like 'handler_discover%'; show status like 'handler_discover%';
Variable_name Value Variable_name Value
Handler_discover 3 Handler_discover 3
...@@ -119,29 +119,6 @@ Variable_name Value ...@@ -119,29 +119,6 @@ Variable_name Value
Handler_discover 2 Handler_discover 2
drop table t3; drop table t3;
flush status; flush status;
create table t4(
id int not null primary key,
name char(27)
) engine=ndb;
insert into t4 values (1, "Automatic");
select * from t4;
id name
1 Automatic
select * from t4;
ERROR HY000: Got error 284 'Table not defined in transaction coordinator' from ndbcluster
flush table t4;
select * from t4;
ERROR HY000: Can't open file: 't4' (errno: 709)
show status like 'handler_discover%';
Variable_name Value
Handler_discover 0
drop table t4;
flush tables;
show tables;
Tables_in_test
select * from t4;
ERROR 42S02: Table 'test.t4' doesn't exist
flush status;
show status like 'handler_discover%'; show status like 'handler_discover%';
Variable_name Value Variable_name Value
Handler_discover 0 Handler_discover 0
...@@ -157,10 +134,6 @@ ALTER TABLE t5 ADD COLUMN adress char(255) FIRST; ...@@ -157,10 +134,6 @@ ALTER TABLE t5 ADD COLUMN adress char(255) FIRST;
select * from t5; select * from t5;
adress id name adress id name
NULL 1 Magnus NULL 1 Magnus
flush table t5;
select * from t5;
adress id name
NULL 1 Magnus
insert into t5 values insert into t5 values
("Adress for record 2", 2, "Carl-Gustav"), ("Adress for record 2", 2, "Carl-Gustav"),
("Adress for record 3", 3, "Karl-Emil"); ("Adress for record 3", 3, "Karl-Emil");
...@@ -190,10 +163,6 @@ ALTER TABLE t6 ADD COLUMN adress char(255) FIRST; ...@@ -190,10 +163,6 @@ ALTER TABLE t6 ADD COLUMN adress char(255) FIRST;
select * from t6; select * from t6;
adress id name adress id name
NULL 1 Magnus NULL 1 Magnus
flush table t6;
select * from t6;
adress id name
NULL 1 Magnus
insert into t6 values insert into t6 values
("Adress for record 2", 2, "Carl-Gustav"), ("Adress for record 2", 2, "Carl-Gustav"),
("Adress for record 3", 3, "Karl-Emil"); ("Adress for record 3", 3, "Karl-Emil");
......
...@@ -39,7 +39,7 @@ insert into t1 values (3, "Discover 3"); ...@@ -39,7 +39,7 @@ insert into t1 values (3, "Discover 3");
show status like 'handler_discover%'; show status like 'handler_discover%';
flush tables; flush tables;
system rm var/master-data/test/t1.frm ; system rm var/master-data/test/t1.frm ;
select * from t1; select * from t1 order by id;
show status like 'handler_discover%'; show status like 'handler_discover%';
# #
...@@ -150,32 +150,33 @@ drop table t3; ...@@ -150,32 +150,33 @@ drop table t3;
# but not in NDB can be deleted from disk. # but not in NDB can be deleted from disk.
# #
flush status; # Manual test
#flush status;
create table t4( #
id int not null primary key, #create table t4(
name char(27) # id int not null primary key,
) engine=ndb; # name char(27)
insert into t4 values (1, "Automatic"); #) engine=ndb;
select * from t4; #insert into t4 values (1, "Automatic");
#select * from t4;
#
# Remove the table from NDB # Remove the table from NDB
#system drop_tab -c "$NDB_CONNECTSTRING2" -d test t4 > /dev/null ; #system drop_tab -c "$NDB_CONNECTSTRING2" -d test t4 > /dev/null ;
system drop_tab -c "host=localhost:2200;nodeid=5" -d test t4 > /dev/null ; #system drop_tab -c "host=localhost:2200;nodeid=5" -d test t4 > /dev/null ;
#
--error 1296 #--error 1296
select * from t4; #select * from t4;
#
flush table t4; #flush table t4;
--error 1016 #--error 1016
select * from t4; #select * from t4;
#
show status like 'handler_discover%'; #show status like 'handler_discover%';
drop table t4; #drop table t4;
flush tables; #flush tables;
show tables; #show tables;
--error 1146 #--error 1146
select * from t4; #select * from t4;
######################################################### #########################################################
...@@ -195,30 +196,10 @@ create table t5( ...@@ -195,30 +196,10 @@ create table t5(
insert into t5 values (1, "Magnus"); insert into t5 values (1, "Magnus");
select * from t5; select * from t5;
# Ugly trick to change version of the table in NDB
# Requires nodeid=5 to be defined and not used
# Until ALTER TABLE works
#system copy_tab -c "$NDB_CONNECTSTRING2" -d test t1 t1_copy > /dev/null ;
#system drop_tab -c "$NDB_CONNECTSTRING2" -d test t1 > /dev/null ;
#system copy_tab -c "$NDB_CONNECTSTRING2" -d test t1_copy t1 > /dev/null ;
#system drop_tab -c "$NDB_CONNECTSTRING2" -d test t1_copy > /dev/null ;
ALTER TABLE t5 ADD COLUMN adress char(255) FIRST; ALTER TABLE t5 ADD COLUMN adress char(255) FIRST;
# The follwing select will exit with
# 1030 Got error 241 from storage engine
# This means it has detected that the schema version of the meta data
# cached locally in NdbApi is not the same as in the Dictionary of NDB.
# The user has to resolve this problem by performing a FLUSH TABLE tabname
#MASV--error 1030
select * from t5; select * from t5;
# The application/user is required to call FLUSH TABLE when error 241 is
# returned. This is a workaround and will in the future be done
# automatically by the server
flush table t5;
select * from t5;
insert into t5 values insert into t5 values
("Adress for record 2", 2, "Carl-Gustav"), ("Adress for record 2", 2, "Carl-Gustav"),
("Adress for record 3", 3, "Karl-Emil"); ("Adress for record 3", 3, "Karl-Emil");
...@@ -246,29 +227,8 @@ create table t6( ...@@ -246,29 +227,8 @@ create table t6(
insert into t6 values (1, "Magnus"); insert into t6 values (1, "Magnus");
select * from t6; select * from t6;
# Ugly trick to change version of the table in NDB
# Requires nodeid=5 to be defined and not used
# Until ALTER TABLE works
#system copy_tab -c "$NDB_CONNECTSTRING2" -d test t6 t6_copy > /dev/null ;
#system drop_tab -c "$NDB_CONNECTSTRING2" -d test t6 > /dev/null ;
#system copy_tab -c "$NDB_CONNECTSTRING2" -d test t6_copy t6 > /dev/null ;
#system drop_tab -c "$NDB_CONNECTSTRING2" -d test t6_copy > /dev/null ;
ALTER TABLE t6 ADD COLUMN adress char(255) FIRST; ALTER TABLE t6 ADD COLUMN adress char(255) FIRST;
# The follwing select will exit with
# 1030 Got error 241 from storage engine
# This means it has detected that the schema version of the meta data
# cached locally in NdbApi is not the same as in the Dictionary of NDB.
# The user has to resolve this problem by performing a FLUSH TABLE tabname
#MASV--error 1030
select * from t6;
# The application/user is required to call FLUSH TABLE when error 241 is
# returned. This is a workaround and will in the future be done
# automatically by the server
flush table t6;
select * from t6; select * from t6;
insert into t6 values insert into t6 values
("Adress for record 2", 2, "Carl-Gustav"), ("Adress for record 2", 2, "Carl-Gustav"),
......
...@@ -475,7 +475,8 @@ static const ulong index_type_flags[]= ...@@ -475,7 +475,8 @@ static const ulong index_type_flags[]=
/* PRIMARY_KEY_INDEX */ /* PRIMARY_KEY_INDEX */
HA_NOT_READ_PREFIX_LAST | HA_NOT_READ_PREFIX_LAST |
HA_ONLY_WHOLE_INDEX, HA_ONLY_WHOLE_INDEX |
HA_WRONG_ASCII_ORDER,
/* PRIMARY_KEY_ORDERED_INDEX */ /* PRIMARY_KEY_ORDERED_INDEX */
/* /*
...@@ -483,19 +484,23 @@ static const ulong index_type_flags[]= ...@@ -483,19 +484,23 @@ static const ulong index_type_flags[]=
thus ORDERD BY clauses can be optimized by reading directly thus ORDERD BY clauses can be optimized by reading directly
through the index. through the index.
*/ */
HA_NOT_READ_PREFIX_LAST, HA_NOT_READ_PREFIX_LAST |
HA_WRONG_ASCII_ORDER,
/* UNIQUE_INDEX */ /* UNIQUE_INDEX */
HA_NOT_READ_PREFIX_LAST | HA_NOT_READ_PREFIX_LAST |
HA_ONLY_WHOLE_INDEX, HA_ONLY_WHOLE_INDEX |
HA_WRONG_ASCII_ORDER,
/* UNIQUE_ORDERED_INDEX */ /* UNIQUE_ORDERED_INDEX */
HA_NOT_READ_PREFIX_LAST, HA_NOT_READ_PREFIX_LAST |
HA_WRONG_ASCII_ORDER,
/* ORDERED_INDEX */ /* ORDERED_INDEX */
HA_READ_NEXT | HA_READ_NEXT |
HA_READ_PREV | HA_READ_PREV |
HA_NOT_READ_AFTER_KEY HA_NOT_READ_PREFIX_LAST |
HA_WRONG_ASCII_ORDER
}; };
static const int index_flags_size= sizeof(index_type_flags)/sizeof(ulong); static const int index_flags_size= sizeof(index_type_flags)/sizeof(ulong);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment