Commit 60e1b917 authored by unknown's avatar unknown

Merge

sql/ha_ndbcluster.h:
  Auto merged
sql/ha_ndbcluster.cc:
  SCCS merged
parents 5dd8dbdc 9856980b
......@@ -199,6 +199,7 @@ tim@siva.hindu.god
tim@threads.polyesthetic.msg
tim@white.box
tim@work.mysql.com
timour@mysql.com
tom@basil-firewall.home.com
tomas@mc05.(none)
tomas@poseidon.(none)
......
......@@ -27,6 +27,7 @@ bin_PROGRAMS = mysql mysqladmin mysqlcheck mysqlshow \
mysqldump mysqlimport mysqltest mysqlbinlog mysqlmanagerc mysqlmanager-pwgen
noinst_HEADERS = sql_string.h completion_hash.h my_readline.h \
client_priv.h
mysqladmin_SOURCES = mysqladmin.cc
mysql_SOURCES = mysql.cc readline.cc sql_string.cc completion_hash.cc
mysql_LDADD = @readline_link@ @TERMCAP_LIB@ $(LDADD) $(CXXLDFLAGS)
mysqlbinlog_LDADD = $(LDADD) $(CXXLDFLAGS)
......
......@@ -1287,9 +1287,6 @@ static my_bool wait_pidfile(char *pidfile, time_t last_modified,
}
DBUG_RETURN(error);
}
#ifdef HAVE_NDBCLUSTER_DB
/* lib linked in contains c++ code */
#ifdef __GNUC__
FIX_GCC_LINKING_PROBLEM
#endif
#endif
......@@ -399,7 +399,6 @@ then
then
if $CXX -v 2>&1 | grep 'version 3' > /dev/null 2>&1
then
CFLAGS="$CFLAGS -DDEFINE_CXA_PURE_VIRTUAL"
CXXFLAGS="$CXXFLAGS -DUSE_MYSYS_NEW -DDEFINE_CXA_PURE_VIRTUAL"
fi
fi
......
......@@ -106,7 +106,7 @@ struct fil_node_struct {
device or a raw disk partition */
ulint size; /* size of the file in database pages, 0 if
not known yet; the possible last incomplete
megabyte is ignored if space == 0 */
megabyte may be ignored if space == 0 */
ulint n_pending;
/* count of pending i/o's on this file;
closing of the file is not allowed if
......@@ -160,7 +160,9 @@ struct fil_space_struct {
UT_LIST_BASE_NODE_T(fil_node_t) chain;
/* base node for the file chain */
ulint size; /* space size in pages; 0 if a single-table
tablespace whose size we do not know yet */
tablespace whose size we do not know yet;
last incomplete megabytes in data files may be
ignored if space == 0 */
ulint n_reserved_extents;
/* number of reserved free extents for
ongoing operations like B-tree page split */
......@@ -3255,7 +3257,7 @@ fil_extend_space_to_desired_size(
ulint* actual_size, /* out: size of the space after extension;
if we ran out of disk space this may be lower
than the desired size */
ulint space_id, /* in: space id, must be != 0 */
ulint space_id, /* in: space id */
ulint size_after_extend)/* in: desired size in pages after the
extension; if the current space size is bigger
than this already, the function does nothing */
......@@ -3352,6 +3354,17 @@ fil_extend_space_to_desired_size(
fil_node_complete_io(node, system, OS_FILE_WRITE);
*actual_size = space->size;
if (space_id == 0) {
ulint pages_per_mb = (1024 * 1024) / UNIV_PAGE_SIZE;
/* Keep the last data file size info up to date, rounded to
full megabytes */
srv_data_file_sizes[srv_n_data_files - 1] =
(node->size / pages_per_mb) * pages_per_mb;
}
/*
printf("Extended %s to %lu, actual size %lu pages\n", space->name,
size_after_extend, *actual_size); */
......
......@@ -478,7 +478,7 @@ fil_extend_space_to_desired_size(
ulint* actual_size, /* out: size of the space after extension;
if we ran out of disk space this may be lower
than the desired size */
ulint space_id, /* in: space id, must be != 0 */
ulint space_id, /* in: space id */
ulint size_after_extend);/* in: desired size in pages after the
extension; if the current space size is bigger
than this already, the function does nothing */
......
......@@ -1509,7 +1509,6 @@ row_ins_scan_sec_index_for_duplicate(
ibool moved;
mtr_t mtr;
trx_t* trx;
const char* ptr;
n_unique = dict_index_get_n_unique(index);
......@@ -1630,7 +1629,6 @@ row_ins_duplicate_error_in_clust(
page_t* page;
ulint n_unique;
trx_t* trx = thr_get_trx(thr);
const char* ptr;
UT_NOT_USED(mtr);
......
......@@ -164,9 +164,9 @@ static void _ftb_parse_query(FTB *ftb, byte **start, byte *end,
if (param.trunc) ftbw->flags|=FTB_FLAG_TRUNC;
ftbw->weight=weight;
ftbw->up=up;
ftbw->docid[0]=ftbw->docid[1]=HA_POS_ERROR;
ftbw->docid[0]=ftbw->docid[1]=HA_OFFSET_ERROR;
ftbw->ndepth= (param.yesno<0) + depth;
ftbw->key_root=HA_POS_ERROR;
ftbw->key_root=HA_OFFSET_ERROR;
memcpy(ftbw->word+1, w.pos, w.len);
ftbw->word[0]=w.len;
if (param.yesno > 0) up->ythresh++;
......@@ -181,7 +181,7 @@ static void _ftb_parse_query(FTB *ftb, byte **start, byte *end,
ftbe->weight=weight;
ftbe->up=up;
ftbe->ythresh=ftbe->yweaks=0;
ftbe->docid[0]=ftbe->docid[1]=HA_POS_ERROR;
ftbe->docid[0]=ftbe->docid[1]=HA_OFFSET_ERROR;
if ((ftbe->quot=param.quot)) ftb->with_scan|=2;
if (param.yesno > 0) up->ythresh++;
_ftb_parse_query(ftb, start, end, ftbe, depth+1);
......@@ -259,7 +259,7 @@ static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
{
if (!ftbw->off || !(ftbw->flags & FTB_FLAG_TRUNC))
{
ftbw->docid[0]=HA_POS_ERROR;
ftbw->docid[0]=HA_OFFSET_ERROR;
if ((ftbw->flags & FTB_FLAG_YES) && ftbw->up->up==0)
{
/*
......@@ -346,7 +346,7 @@ static void _ftb_init_index_search(FT_INFO *ftb)
ftbe->up->ythresh - ftbe->up->yweaks >1) /* 1 */
{
FTB_EXPR *top_ftbe=ftbe->up->up;
ftbw->docid[0]=HA_POS_ERROR;
ftbw->docid[0]=HA_OFFSET_ERROR;
for (ftbe=ftbw->up; ftbe != top_ftbe; ftbe=ftbe->up)
if (ftbe->flags & FTB_FLAG_YES)
ftbe->yweaks++;
......@@ -387,7 +387,7 @@ FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query,
ftb->charset= ((keynr==NO_SUCH_KEY) ?
default_charset_info : info->s->keyinfo[keynr].seg->charset);
ftb->with_scan=0;
ftb->lastpos=HA_POS_ERROR;
ftb->lastpos=HA_OFFSET_ERROR;
bzero(& ftb->no_dupes, sizeof(TREE));
init_alloc_root(&ftb->mem_root, 1024, 1024);
......@@ -410,7 +410,7 @@ FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query,
ftbe->quot=0;
ftbe->up=0;
ftbe->ythresh=ftbe->yweaks=0;
ftbe->docid[0]=ftbe->docid[1]=HA_POS_ERROR;
ftbe->docid[0]=ftbe->docid[1]=HA_OFFSET_ERROR;
ftb->root=ftbe;
_ftb_parse_query(ftb, &query, query+query_len, ftbe, 0);
ftb->list=(FTB_WORD **)alloc_root(&ftb->mem_root,
......@@ -561,7 +561,7 @@ int ft_boolean_read_next(FT_INFO *ftb, char *record)
while (ftb->state == INDEX_SEARCH &&
(curdoc=((FTB_WORD *)queue_top(& ftb->queue))->docid[0]) !=
HA_POS_ERROR)
HA_OFFSET_ERROR)
{
while (curdoc == (ftbw=(FTB_WORD *)queue_top(& ftb->queue))->docid[0])
{
......@@ -615,7 +615,7 @@ float ft_boolean_find_relevance(FT_INFO *ftb, byte *record, uint length)
const byte *end;
my_off_t docid=ftb->info->lastpos;
if (docid == HA_POS_ERROR)
if (docid == HA_OFFSET_ERROR)
return -2.0;
if (!ftb->queue.elements)
return 0;
......@@ -627,9 +627,9 @@ float ft_boolean_find_relevance(FT_INFO *ftb, byte *record, uint length)
for (i=0; i < ftb->queue.elements; i++)
{
ftb->list[i]->docid[1]=HA_POS_ERROR;
ftb->list[i]->docid[1]=HA_OFFSET_ERROR;
for (x=ftb->list[i]->up; x; x=x->up)
x->docid[1]=HA_POS_ERROR;
x->docid[1]=HA_OFFSET_ERROR;
}
}
......
......@@ -487,3 +487,18 @@ prepare stmt1 from @str2;
execute stmt1 using @ivar;
?
1234
SET TIMESTAMP=10000;
create table t2 (c char(30)) charset=ucs2;
set @v=convert('abc' using ucs2);
reset master;
insert into t2 values (@v);
show binlog events from 79;
Log_name Pos Event_type Server_id Orig_log_pos Info
master-bin.000001 79 User var 1 79 @`v`=_ucs2 0x006100620063 COLLATE ucs2_general_ci
master-bin.000001 119 Query 1 119 use `test`; insert into t2 values (@v)
/*!40019 SET @@session.max_insert_delayed_threads=0*/;
SET @`v`:=_ucs2 0x006100620063 COLLATE ucs2_general_ci;
use test;
SET TIMESTAMP=10000;
insert into t2 values (@v);
drop table t2;
......@@ -382,3 +382,12 @@ s
pra para para
para para para
DROP TABLE t1;
CREATE TABLE t1 (h text, FULLTEXT (h));
INSERT INTO t1 VALUES ('Jesses Hasse Ling and his syncopators of Swing');
REPAIR TABLE t1;
Table Op Msg_type Msg_text
test.t1 repair status OK
select count(*) from t1;
count(*)
1
drop table t1;
......@@ -78,9 +78,9 @@ unique key(a)
) engine=ndb;
insert into t1 values(1, 'aAa');
insert into t1 values(2, 'aaa');
ERROR 23000: Can't write, because of unique constraint, to table 't1'
ERROR 23000: Duplicate entry '2' for key 1
insert into t1 values(3, 'AAA');
ERROR 23000: Can't write, because of unique constraint, to table 't1'
ERROR 23000: Duplicate entry '3' for key 1
select * from t1 order by p;
p a
1 aAa
......
......@@ -22,7 +22,7 @@ select * from t1 where b = 4 order by a;
a b c
3 4 6
insert into t1 values(8, 2, 3);
ERROR 23000: Can't write, because of unique constraint, to table 't1'
ERROR 23000: Duplicate entry '8' for key 1
select * from t1 order by a;
a b c
1 2 3
......@@ -65,7 +65,7 @@ select * from t2 where b = 4 order by a;
a b c
3 4 6
insert into t2 values(8, 2, 3);
ERROR 23000: Can't write, because of unique constraint, to table 't2'
ERROR 23000: Duplicate entry '8' for key 1
select * from t2 order by a;
a b c
1 2 3
......@@ -123,7 +123,7 @@ pk a
3 NULL
4 4
insert into t1 values (5,0);
ERROR 23000: Can't write, because of unique constraint, to table 't1'
ERROR 23000: Duplicate entry '5' for key 1
select * from t1 order by pk;
pk a
-1 NULL
......@@ -156,7 +156,7 @@ pk a b c
0 NULL 18 NULL
1 3 19 abc
insert into t2 values(2,3,19,'abc');
ERROR 23000: Can't write, because of unique constraint, to table 't2'
ERROR 23000: Duplicate entry '2' for key 1
select * from t2 order by pk;
pk a b c
-1 1 17 NULL
......
......@@ -535,27 +535,46 @@ count(*)
2000
insert into t1 select * from t1 where b < 10 order by pk1;
ERROR 23000: Duplicate entry '9' for key 1
DELETE FROM t1 WHERE pk1=2;
begin;
INSERT IGNORE INTO t1 VALUES(1,2,3);
ERROR HY000: Table storage engine for 't1' doesn't have this option
commit;
select * from t1 where pk1=1;
INSERT IGNORE INTO t1 VALUES(1,2,3),(2,3,4);
select * from t1 where pk1 < 3 order by pk1;
pk1 b c
0 0 0
1 1 1
INSERT IGNORE INTO t1 VALUES(1,2,3);
ERROR HY000: Table storage engine for 't1' doesn't have this option
select * from t1 where pk1=1;
2 3 4
rollback;
INSERT IGNORE INTO t1 VALUES(1,2,3),(2,3,4);
select * from t1 where pk1 < 3 order by pk1;
pk1 b c
0 0 0
1 1 1
REPLACE INTO t1 values(1, 2, 3);
2 3 4
REPLACE INTO t1 values(1, 78, 3);
select * from t1 where pk1=1;
pk1 b c
1 2 3
INSERT INTO t1 VALUES(1,1,1) ON DUPLICATE KEY UPDATE b=79;
ERROR HY000: Table storage engine for 't1' doesn't have this option
select * from t1 where pk1=1;
1 78 3
INSERT INTO t1 VALUES(1,1,1),(3,4,5) ON DUPLICATE KEY UPDATE b=79;
select * from t1 where pk1 < 4 order by pk1;
pk1 b c
0 0 0
1 79 3
2 3 4
3 79 3
INSERT INTO t1 VALUES(1,1,1),(3,4,5) ON DUPLICATE KEY UPDATE b=pk1+c;
select * from t1 where pk1 < 4 order by pk1;
pk1 b c
0 0 0
1 4 3
2 3 4
3 6 3
DELETE FROM t1 WHERE pk1 = 2 OR pk1 = 4 OR pk1 = 6;
INSERT INTO t1 VALUES(1,1,1),(2,2,17),(3,4,5) ON DUPLICATE KEY UPDATE pk1=b;
select * from t1 where pk1 = b and b != c order by pk1;
pk1 b c
1 2 3
2 2 17
4 4 3
6 6 3
DROP TABLE t1;
CREATE TABLE t1(a INT) ENGINE=ndb;
INSERT IGNORE INTO t1 VALUES (1);
......
......@@ -1110,4 +1110,14 @@ t1 CREATE TABLE `t1` (
`a` char(1) character set latin1 collate latin1_german1_ci default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
create table t1 as
(select a from t2) union
(select b from t2) union
(select 'c' collate latin1_german1_ci from t2);
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` char(1) character set latin1 collate latin1_german1_ci default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
drop table t2;
......@@ -169,18 +169,12 @@ SET @`a b`='hello';
INSERT INTO t1 VALUES(@`a b`);
set @var1= "';aaa";
insert into t1 values (@var1);
create table t2 (c char(30)) charset=ucs2;
set @v=convert('abc' using ucs2);
insert into t2 values (@v);
show binlog events from 79;
Log_name Pos Event_type Server_id Orig_log_pos Info
master-bin.000001 79 User var 1 79 @`a b`=_latin1 0x68656C6C6F COLLATE latin1_swedish_ci
master-bin.000001 120 Query 1 120 use `test`; INSERT INTO t1 VALUES(@`a b`)
master-bin.000001 184 User var 1 184 @`var1`=_latin1 0x273B616161 COLLATE latin1_swedish_ci
master-bin.000001 226 Query 1 226 use `test`; insert into t1 values (@var1)
master-bin.000001 290 Query 1 290 use `test`; create table t2 (c char(30)) charset=ucs2
master-bin.000001 366 User var 1 366 @`v`=_ucs2 0x006100620063 COLLATE ucs2_general_ci
master-bin.000001 406 Query 1 406 use `test`; insert into t2 values (@v)
/*!40019 SET @@session.max_insert_delayed_threads=0*/;
SET @`a b`:=_latin1 0x68656C6C6F COLLATE latin1_swedish_ci;
use test;
......@@ -189,12 +183,7 @@ INSERT INTO t1 VALUES(@`a b`);
SET @`var1`:=_latin1 0x273B616161 COLLATE latin1_swedish_ci;
SET TIMESTAMP=10000;
insert into t1 values (@var1);
SET TIMESTAMP=10000;
create table t2 (c char(30)) charset=ucs2;
SET @`v`:=_ucs2 0x006100620063 COLLATE ucs2_general_ci;
SET TIMESTAMP=10000;
insert into t2 values (@v);
drop table t1, t2;
drop table t1;
set @var= NULL ;
select FIELD( @var,'1it','Hit') as my_column;
my_column
......
......@@ -323,3 +323,19 @@ set @str1 = 'select ?';
set @str2 = convert(@str1 using ucs2);
prepare stmt1 from @str2;
execute stmt1 using @ivar;
#
# Check correct binlogging of UCS2 user variables (BUG#3875)
#
SET TIMESTAMP=10000;
create table t2 (c char(30)) charset=ucs2;
set @v=convert('abc' using ucs2);
reset master;
insert into t2 values (@v);
show binlog events from 79;
# more important than SHOW BINLOG EVENTS, mysqlbinlog (where we
# absolutely need variables names to be quoted and strings to be
# escaped).
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
--exec $MYSQL_BINLOG --short-form $MYSQL_TEST_DIR/var/log/master-bin.000001
drop table t2;
......@@ -295,3 +295,14 @@ insert into t1 (s) values ('p
select * from t1 where match(s) against('para' in boolean mode);
select * from t1 where match(s) against('par*' in boolean mode);
DROP TABLE t1;
#
# icc -ip bug (ip = interprocedural optimization)
# bug#5528
#
CREATE TABLE t1 (h text, FULLTEXT (h));
INSERT INTO t1 VALUES ('Jesses Hasse Ling and his syncopators of Swing');
REPAIR TABLE t1;
select count(*) from t1;
drop table t1;
......@@ -86,9 +86,9 @@ create table t1 (
# ok
insert into t1 values(1, 'aAa');
# fail
--error 1169
--error 1062
insert into t1 values(2, 'aaa');
--error 1169
--error 1062
insert into t1 values(3, 'AAA');
# 1
select * from t1 order by p;
......
......@@ -21,7 +21,7 @@ select * from t1 where b = 4 order by b;
insert into t1 values(7,8,3);
select * from t1 where b = 4 order by a;
-- error 1169
-- error 1062
insert into t1 values(8, 2, 3);
select * from t1 order by a;
delete from t1 where a = 1;
......@@ -49,7 +49,7 @@ select * from t2 where c = 6;
insert into t2 values(7,8,3);
select * from t2 where b = 4 order by a;
-- error 1169
-- error 1062
insert into t2 values(8, 2, 3);
select * from t2 order by a;
delete from t2 where a = 1;
......@@ -92,7 +92,7 @@ insert into t1 values (-1,NULL), (0,0), (1,NULL),(2,2),(3,NULL),(4,4);
select * from t1 order by pk;
--error 1169
--error 1062
insert into t1 values (5,0);
select * from t1 order by pk;
delete from t1 where a = 0;
......@@ -111,7 +111,7 @@ insert into t2 values (-1,1,17,NULL),(0,NULL,18,NULL),(1,3,19,'abc');
select * from t2 order by pk;
--error 1169
--error 1062
insert into t2 values(2,3,19,'abc');
select * from t2 order by pk;
delete from t2 where c IS NOT NULL;
......
......@@ -564,23 +564,37 @@ select count(*) from t1;
--error 1062
insert into t1 select * from t1 where b < 10 order by pk1;
DELETE FROM t1 WHERE pk1=2;
begin;
--error 1031
INSERT IGNORE INTO t1 VALUES(1,2,3);
commit;
select * from t1 where pk1=1;
INSERT IGNORE INTO t1 VALUES(1,2,3),(2,3,4);
select * from t1 where pk1 < 3 order by pk1;
rollback;
--error 1031
INSERT IGNORE INTO t1 VALUES(1,2,3);
select * from t1 where pk1=1;
INSERT IGNORE INTO t1 VALUES(1,2,3),(2,3,4);
select * from t1 where pk1 < 3 order by pk1;
REPLACE INTO t1 values(1, 2, 3);
REPLACE INTO t1 values(1, 78, 3);
select * from t1 where pk1=1;
--error 1031
INSERT INTO t1 VALUES(1,1,1) ON DUPLICATE KEY UPDATE b=79;
select * from t1 where pk1=1;
INSERT INTO t1 VALUES(1,1,1),(3,4,5) ON DUPLICATE KEY UPDATE b=79;
select * from t1 where pk1 < 4 order by pk1;
INSERT INTO t1 VALUES(1,1,1),(3,4,5) ON DUPLICATE KEY UPDATE b=pk1+c;
select * from t1 where pk1 < 4 order by pk1;
DELETE FROM t1 WHERE pk1 = 2 OR pk1 = 4 OR pk1 = 6;
INSERT INTO t1 VALUES(1,1,1),(2,2,17),(3,4,5) ON DUPLICATE KEY UPDATE pk1=b;
select * from t1 where pk1 = b and b != c order by pk1;
# The following test case currently does not work
#DELETE FROM t1;
#CREATE UNIQUE INDEX bi ON t1(b);
#INSERT INTO t1 VALUES
#(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
#(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
#INSERT INTO t1 VALUES(0,1,0),(21,21,21) ON DUPLICATE KEY UPDATE pk1=b+10,c=b+10;
#select * from t1 order by pk1;
DROP TABLE t1;
......
......@@ -652,5 +652,11 @@ create table t1 as
(select b collate latin1_german1_ci from t2);
show create table t1;
drop table t1;
create table t1 as
(select a from t2) union
(select b from t2) union
(select 'c' collate latin1_german1_ci from t2);
show create table t1;
drop table t1;
drop table t2;
......@@ -108,16 +108,13 @@ SET @`a b`='hello';
INSERT INTO t1 VALUES(@`a b`);
set @var1= "';aaa";
insert into t1 values (@var1);
create table t2 (c char(30)) charset=ucs2;
set @v=convert('abc' using ucs2);
insert into t2 values (@v);
show binlog events from 79;
# more important than SHOW BINLOG EVENTS, mysqlbinlog (where we
# absolutely need variables names to be quoted and strings to be
# escaped).
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
--exec $MYSQL_BINLOG --short-form $MYSQL_TEST_DIR/var/log/master-bin.000001
drop table t1, t2;
drop table t1;
#
......
......@@ -309,7 +309,7 @@ void multi_keycache_free(void)
Get a key cache to be used for a specific table.
SYNOPSIS
multi_key_cache_get()
multi_key_cache_search()
key key to find (usually table path)
uint length Length of key.
......
......@@ -28,6 +28,7 @@ ndbapi/NdbIndexScanOperation.hpp \
ndbapi/ndberror.h
mgmapiinclude_HEADERS = \
mgmapi/LocalConfig.hpp \
mgmapi/mgmapi.h \
mgmapi/mgmapi_debug.h
......
......@@ -132,9 +132,10 @@ class TupAddAttrConf {
friend class Dblqh;
friend class Dbtup;
public:
STATIC_CONST( SignalLength = 1 );
STATIC_CONST( SignalLength = 2 );
private:
Uint32 userPtr;
Uint32 lastAttr; // bool: got last attr and closed frag op
};
class TupAddAttrRef {
......@@ -171,9 +172,10 @@ class TuxAddAttrConf {
friend class Dblqh;
friend class Dbtux;
public:
STATIC_CONST( SignalLength = 1 );
STATIC_CONST( SignalLength = 2 );
private:
Uint32 userPtr;
Uint32 lastAttr; // bool: got last attr and closed frag op
};
class TuxAddAttrRef {
......
......@@ -582,7 +582,7 @@ NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info)
cs->cset != 0 &&
cs->coll != 0 &&
cs->coll->strnxfrm != 0 &&
cs->strxfrm_multiply == 1; // current limitation
cs->strxfrm_multiply <= 1; // current limitation
}
break;
case Type::Varchar:
......@@ -618,7 +618,7 @@ NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
cs->coll != 0 &&
cs->coll->strnxfrm != 0 &&
cs->coll->strnncollsp != 0 &&
cs->strxfrm_multiply == 1; // current limitation
cs->strxfrm_multiply <= 1; // current limitation
}
break;
case Type::Varchar:
......@@ -633,7 +633,7 @@ NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
cs->coll != 0 &&
cs->coll->strnxfrm != 0 &&
cs->coll->strnncollsp != 0 &&
cs->strxfrm_multiply == 1; // current limitation
cs->strxfrm_multiply <= 1; // current limitation
}
break;
default:
......
......@@ -2,7 +2,7 @@ Next QMGR 1
Next NDBCNTR 1000
Next NDBFS 2000
Next DBACC 3001
Next DBTUP 4007
Next DBTUP 4013
Next DBLQH 5042
Next DBDICT 6006
Next DBDIH 7174
......@@ -10,7 +10,7 @@ Next DBTC 8035
Next CMVMI 9000
Next BACKUP 10022
Next DBUTIL 11002
Next DBTUX 12001
Next DBTUX 12007
Next SUMA 13001
TESTING NODE FAILURE, ARBITRATION
......@@ -393,6 +393,12 @@ Failed Create Table:
--------------------
7173: Create table failed due to not sufficient number of fragment or
replica records.
4007 12001: Fail create 1st fragment
4008 12002: Fail create 2nd fragment
4009 12003: Fail create 1st attribute in 1st fragment
4010 12004: Fail create last attribute in 1st fragment
4011 12005: Fail create 1st attribute in 2nd fragment
4012 12006: Fail create last attribute in 2nd fragment
Drop Table/Index:
-----------------
......
......@@ -239,7 +239,11 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
w.add(DictTabInfo::TableName, tablePtr.p->tableName);
w.add(DictTabInfo::TableId, tablePtr.i);
#ifdef HAVE_TABLE_REORG
w.add(DictTabInfo::SecondTableId, tablePtr.p->secondTable);
#else
w.add(DictTabInfo::SecondTableId, (Uint32)0);
#endif
w.add(DictTabInfo::TableVersion, tablePtr.p->tableVersion);
w.add(DictTabInfo::NoOfKeyAttr, tablePtr.p->noOfPrimkey);
w.add(DictTabInfo::NoOfAttributes, tablePtr.p->noOfAttributes);
......@@ -1436,6 +1440,7 @@ Uint32 Dbdict::getFreeTableRecord(Uint32 primaryTableId)
jam();
return RNIL;
}//if
#ifdef HAVE_TABLE_REORG
bool secondFound = false;
for (tablePtr.i = firstTablePtr.i + 1; tablePtr.i < tabSize ; tablePtr.i++) {
jam();
......@@ -1455,6 +1460,7 @@ Uint32 Dbdict::getFreeTableRecord(Uint32 primaryTableId)
firstTablePtr.p->tabState = TableRecord::NOT_DEFINED;
return RNIL;
}//if
#endif
return firstTablePtr.i;
}//Dbdict::getFreeTableRecord()
......@@ -4623,7 +4629,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
jam();
tablePtr.p->tabState = TableRecord::DEFINING;
}//if
#ifdef HAVE_TABLE_REORG
/* ---------------------------------------------------------------- */
// Get id of second table id and check that table doesn't already exist
// and set up links between first and second table.
......@@ -4637,7 +4643,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
secondTablePtr.p->tabState = TableRecord::REORG_TABLE_PREPARED;
secondTablePtr.p->secondTable = tablePtr.i;
tablePtr.p->secondTable = secondTablePtr.i;
#endif
/* ---------------------------------------------------------------- */
// Set table version
/* ---------------------------------------------------------------- */
......@@ -5535,10 +5541,12 @@ void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash)
nextAttrRecord = attrPtr.p->nextAttrInTable;
c_attributeRecordPool.release(attrPtr);
}//if
#ifdef HAVE_TABLE_REORG
Uint32 secondTableId = tablePtr.p->secondTable;
initialiseTableRecord(tablePtr);
c_tableRecordPool.getPtr(tablePtr, secondTableId);
initialiseTableRecord(tablePtr);
#endif
return;
}//releaseTableObject()
......
......@@ -151,10 +151,10 @@ public:
/* Temporary record used during add/drop table */
Uint32 myConnect;
#ifdef HAVE_TABLE_REORG
/* Second table used by this table (for table reorg) */
Uint32 secondTable;
#endif
/* Next record in Pool */
Uint32 nextPool;
......
......@@ -2474,7 +2474,7 @@ private:
void sendExecFragRefLab(Signal* signal);
void fragrefLab(Signal* signal, BlockReference retRef,
Uint32 retPtr, Uint32 errorCode);
void accFragRefLab(Signal* signal);
void abortAddFragOps(Signal* signal);
void rwConcludedLab(Signal* signal);
void sendsttorryLab(Signal* signal);
void initialiseRecordsLab(Signal* signal, Uint32 data, Uint32, Uint32);
......
......@@ -912,6 +912,10 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal)
/* *********************************************************> */
/* LQHFRAGREQ: Create new fragments for a table. Sender DICT */
/* *********************************************************> */
// this unbelievable mess could be replaced by one signal to LQH
// and execute direct to local DICT to get everything at once
void Dblqh::execLQHFRAGREQ(Signal* signal)
{
jamEntry();
......@@ -1049,6 +1053,11 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
addfragptr.p->lh3DistrBits = tlhstar;
addfragptr.p->tableType = tableType;
addfragptr.p->primaryTableId = primaryTableId;
//
addfragptr.p->tup1Connectptr = RNIL;
addfragptr.p->tup2Connectptr = RNIL;
addfragptr.p->tux1Connectptr = RNIL;
addfragptr.p->tux2Connectptr = RNIL;
if (DictTabInfo::isTable(tableType) ||
DictTabInfo::isHashIndex(tableType)) {
......@@ -1329,15 +1338,21 @@ void Dblqh::execTUP_ADD_ATTCONF(Signal* signal)
{
jamEntry();
addfragptr.i = signal->theData[0];
// implies that operation was released on the other side
const bool lastAttr = signal->theData[1];
ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
switch (addfragptr.p->addfragStatus) {
case AddFragRecord::TUP_ATTR_WAIT1:
jam();
if (lastAttr)
addfragptr.p->tup1Connectptr = RNIL;
addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT2;
sendAddAttrReq(signal);
break;
case AddFragRecord::TUP_ATTR_WAIT2:
jam();
if (lastAttr)
addfragptr.p->tup2Connectptr = RNIL;
if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT1;
sendAddAttrReq(signal);
......@@ -1347,11 +1362,15 @@ void Dblqh::execTUP_ADD_ATTCONF(Signal* signal)
break;
case AddFragRecord::TUX_ATTR_WAIT1:
jam();
if (lastAttr)
addfragptr.p->tux1Connectptr = RNIL;
addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT2;
sendAddAttrReq(signal);
break;
case AddFragRecord::TUX_ATTR_WAIT2:
jam();
if (lastAttr)
addfragptr.p->tux2Connectptr = RNIL;
goto done_with_attr;
break;
done_with_attr:
......@@ -1455,6 +1474,7 @@ Dblqh::sendAddAttrReq(Signal* signal)
jam();
TupAddAttrConf* tupconf = (TupAddAttrConf*)signal->getDataPtrSend();
tupconf->userPtr = addfragptr.i;
tupconf->lastAttr = false;
sendSignal(reference(), GSN_TUP_ADD_ATTCONF,
signal, TupAddAttrConf::SignalLength, JBB);
return;
......@@ -1485,6 +1505,7 @@ Dblqh::sendAddAttrReq(Signal* signal)
jam();
TuxAddAttrConf* tuxconf = (TuxAddAttrConf*)signal->getDataPtrSend();
tuxconf->userPtr = addfragptr.i;
tuxconf->lastAttr = false;
sendSignal(reference(), GSN_TUX_ADD_ATTRCONF,
signal, TuxAddAttrConf::SignalLength, JBB);
return;
......@@ -1549,6 +1570,40 @@ void Dblqh::fragrefLab(Signal* signal,
return;
}//Dblqh::fragrefLab()
/*
* Abort on-going ops.
*/
void Dblqh::abortAddFragOps(Signal* signal)
{
fragptr.i = addfragptr.p->fragmentPtr;
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
signal->theData[0] = (Uint32)-1;
if (addfragptr.p->tup1Connectptr != RNIL) {
jam();
signal->theData[1] = addfragptr.p->tup1Connectptr;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
addfragptr.p->tup1Connectptr = RNIL;
}
if (addfragptr.p->tup2Connectptr != RNIL) {
jam();
signal->theData[1] = addfragptr.p->tup2Connectptr;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
addfragptr.p->tup2Connectptr = RNIL;
}
if (addfragptr.p->tux1Connectptr != RNIL) {
jam();
signal->theData[1] = addfragptr.p->tux1Connectptr;
sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
addfragptr.p->tux1Connectptr = RNIL;
}
if (addfragptr.p->tux2Connectptr != RNIL) {
jam();
signal->theData[1] = addfragptr.p->tux2Connectptr;
sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
addfragptr.p->tux2Connectptr = RNIL;
}
}
/* ************>> */
/* ACCFRAGREF > */
/* ************>> */
......@@ -1582,6 +1637,27 @@ void Dblqh::execTUPFRAGREF(Signal* signal)
fragptr.i = addfragptr.p->fragmentPtr;
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
addfragptr.p->addfragErrorCode = terrorCode;
// no operation to release, just add some jams
switch (addfragptr.p->addfragStatus) {
case AddFragRecord::WAIT_TWO_TUP:
jam();
break;
case AddFragRecord::WAIT_ONE_TUP:
jam();
break;
case AddFragRecord::WAIT_TWO_TUX:
jam();
break;
case AddFragRecord::WAIT_ONE_TUX:
jam();
break;
default:
ndbrequire(false);
break;
}
abortAddFragOps(signal);
const Uint32 ref = addfragptr.p->dictBlockref;
const Uint32 senderData = addfragptr.p->dictConnectptr;
const Uint32 errorCode = addfragptr.p->addfragErrorCode;
......@@ -1605,11 +1681,38 @@ void Dblqh::execTUXFRAGREF(Signal* signal)
void Dblqh::execTUP_ADD_ATTRREF(Signal* signal)
{
jamEntry();
addfragptr.i = signal->theData[0];
ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
terrorCode = signal->theData[1];
addfragptr.p->addfragErrorCode = terrorCode;
// operation was released on the other side
switch (addfragptr.p->addfragStatus) {
case AddFragRecord::TUP_ATTR_WAIT1:
jam();
ndbrequire(addfragptr.p->tup1Connectptr != RNIL);
addfragptr.p->tup1Connectptr = RNIL;
break;
case AddFragRecord::TUP_ATTR_WAIT2:
jam();
ndbrequire(addfragptr.p->tup2Connectptr != RNIL);
addfragptr.p->tup2Connectptr = RNIL;
break;
case AddFragRecord::TUX_ATTR_WAIT1:
jam();
ndbrequire(addfragptr.p->tux1Connectptr != RNIL);
addfragptr.p->tux1Connectptr = RNIL;
break;
case AddFragRecord::TUX_ATTR_WAIT2:
jam();
ndbrequire(addfragptr.p->tux2Connectptr != RNIL);
addfragptr.p->tux2Connectptr = RNIL;
break;
default:
ndbrequire(false);
break;
}
abortAddFragOps(signal);
const Uint32 Ref = addfragptr.p->dictBlockref;
const Uint32 senderData = addfragptr.p->dictConnectptr;
......
......@@ -504,6 +504,7 @@ struct Fragoperrec {
Uint32 noOfNewAttrCount;
Uint32 charsetIndex;
BlockReference lqhBlockrefFrag;
bool inUse;
};
typedef Ptr<Fragoperrec> FragoperrecPtr;
......@@ -1936,6 +1937,7 @@ private:
void setUpKeyArray(Tablerec* const regTabPtr);
bool addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex);
void deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId);
void abortAddFragOp(Signal* signal);
void releaseTabDescr(Tablerec* const regTabPtr);
void getFragmentrec(FragrecordPtr& regFragPtr, Uint32 fragId, Tablerec* const regTabPtr);
......
......@@ -39,11 +39,18 @@
/* ---------------------------------------------------------------- */
void Dbtup::execTUPFRAGREQ(Signal* signal)
{
ljamEntry();
if (signal->theData[0] == (Uint32)-1) {
ljam();
abortAddFragOp(signal);
return;
}
FragoperrecPtr fragOperPtr;
FragrecordPtr regFragPtr;
TablerecPtr regTabPtr;
ljamEntry();
Uint32 userptr = signal->theData[0];
Uint32 userblockref = signal->theData[1];
Uint32 reqinfo = signal->theData[2];
......@@ -132,6 +139,15 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
return;
}//if
if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId ||
ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) {
ljam();
terrorCode = 1;
fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
CLEAR_ERROR_INSERT_VALUE;
return;
}
if (regTabPtr.p->tableStatus == NOT_DEFINED) {
ljam();
//-------------------------------------------------------------------------------------
......@@ -243,6 +259,7 @@ void Dbtup::seizeFragoperrec(FragoperrecPtr& fragOperPtr)
ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
cfirstfreeFragopr = fragOperPtr.p->nextFragoprec;
fragOperPtr.p->nextFragoprec = RNIL;
fragOperPtr.p->inUse = true;
}//Dbtup::seizeFragoperrec()
/* **************************************************************** */
......@@ -273,6 +290,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
ndbrequire(fragOperPtr.p->attributeCount > 0);
fragOperPtr.p->attributeCount--;
const bool lastAttr = (fragOperPtr.p->attributeCount == 0);
if ((regTabPtr.p->tableStatus == DEFINING) &&
(fragOperPtr.p->definingFragment)) {
......@@ -346,20 +364,30 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
return;
}//if
if ((fragOperPtr.p->attributeCount == 0) &&
(fragOperPtr.p->freeNullBit != 0)) {
if (lastAttr && (fragOperPtr.p->freeNullBit != 0)) {
ljam();
terrorCode = ZINCONSISTENT_NULL_ATTRIBUTE_COUNT;
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
return;
}//if
}//if
if (ERROR_INSERTED(4009) && regTabPtr.p->fragid[0] == fragId && attrId == 0 ||
ERROR_INSERTED(4010) && regTabPtr.p->fragid[0] == fragId && lastAttr ||
ERROR_INSERTED(4011) && regTabPtr.p->fragid[1] == fragId && attrId == 0 ||
ERROR_INSERTED(4012) && regTabPtr.p->fragid[1] == fragId && lastAttr) {
ljam();
terrorCode = 1;
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
CLEAR_ERROR_INSERT_VALUE;
return;
}
/* **************************************************************** */
/* ************** TUP_ADD_ATTCONF ****************** */
/* **************************************************************** */
signal->theData[0] = fragOperPtr.p->lqhPtrFrag;
sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, signal, 1, JBB);
if (fragOperPtr.p->attributeCount > 0) {
signal->theData[1] = lastAttr;
sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, signal, 2, JBB);
if (! lastAttr) {
ljam();
return; /* EXIT AND WAIT FOR MORE */
}//if
......@@ -491,11 +519,11 @@ void Dbtup::fragrefuseLab(Signal* signal, FragoperrecPtr fragOperPtr)
void Dbtup::releaseFragoperrec(FragoperrecPtr fragOperPtr)
{
fragOperPtr.p->inUse = false;
fragOperPtr.p->nextFragoprec = cfirstfreeFragopr;
cfirstfreeFragopr = fragOperPtr.i;
}//Dbtup::releaseFragoperrec()
void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId)
{
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
......@@ -510,6 +538,20 @@ void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId)
ndbrequire(false);
}//Dbtup::deleteFragTab()
/*
* LQH aborts on-going create table operation. The table is later
* dropped by DICT.
*/
void Dbtup::abortAddFragOp(Signal* signal)
{
FragoperrecPtr fragOperPtr;
fragOperPtr.i = signal->theData[1];
ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
ndbrequire(fragOperPtr.p->inUse);
releaseFragoperrec(fragOperPtr);
}
void
Dbtup::execDROP_TAB_REQ(Signal* signal)
{
......
......@@ -575,6 +575,7 @@ private:
void execDROP_TAB_REQ(Signal* signal);
bool allocDescEnt(IndexPtr indexPtr);
void freeDescEnt(IndexPtr indexPtr);
void abortAddFragOp(Signal* signal);
void dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 senderData);
/*
......@@ -684,6 +685,7 @@ private:
friend class NdbOut& operator<<(NdbOut&, const ScanOp&);
friend class NdbOut& operator<<(NdbOut&, const Index&);
friend class NdbOut& operator<<(NdbOut&, const Frag&);
friend class NdbOut& operator<<(NdbOut&, const FragOp&);
friend class NdbOut& operator<<(NdbOut&, const NodeHandle&);
FILE* debugFile;
NdbOut debugOut;
......
......@@ -403,6 +403,19 @@ operator<<(NdbOut& out, const Dbtux::Frag& frag)
return out;
}
NdbOut&
operator<<(NdbOut& out, const Dbtux::FragOp& fragOp)
{
out << "[FragOp " << hex << &fragOp;
out << " [userPtr " << dec << fragOp.m_userPtr << "]";
out << " [indexId " << dec << fragOp.m_indexId << "]";
out << " [fragId " << dec << fragOp.m_fragId << "]";
out << " [fragNo " << dec << fragOp.m_fragNo << "]";
out << " numAttrsRecvd " << dec << fragOp.m_numAttrsRecvd << "]";
out << "]";
return out;
}
NdbOut&
operator<<(NdbOut& out, const Dbtux::NodeHandle& node)
{
......
......@@ -24,12 +24,7 @@ Dbtux::Dbtux(const Configuration& conf) :
#ifdef VM_TRACE
debugFile(0),
debugOut(*new NullOutputStream()),
// until ndb_mgm supports dump
#ifdef DBTUX_DEBUG_TREE
debugFlags(DebugTree),
#else
debugFlags(0),
#endif
#endif
c_internalStartPhase(0),
c_typeOfStart(NodeState::ST_ILLEGAL_TYPE),
......@@ -86,7 +81,7 @@ Dbtux::execCONTINUEB(Signal* signal)
jamEntry();
const Uint32* data = signal->getDataPtr();
switch (data[0]) {
case TuxContinueB::DropIndex:
case TuxContinueB::DropIndex: // currently unused
{
IndexPtr indexPtr;
c_indexPool.getPtr(indexPtr, data[1]);
......@@ -174,7 +169,7 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal)
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_ATTRIBUTE, &nAttribute));
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_SCAN_OP, &nScanOp));
const Uint32 nDescPage = (nIndex + nAttribute + DescPageSize - 1) / DescPageSize;
const Uint32 nDescPage = (nIndex * DescHeadSize + nAttribute * DescAttrSize + DescPageSize - 1) / DescPageSize;
const Uint32 nScanBoundWords = nScanOp * ScanBoundSegmentSize * 4;
c_indexPool.setSize(nIndex);
......
......@@ -29,6 +29,11 @@ void
Dbtux::execTUXFRAGREQ(Signal* signal)
{
jamEntry();
if (signal->theData[0] == (Uint32)-1) {
jam();
abortAddFragOp(signal);
return;
}
const TuxFragReq reqCopy = *(const TuxFragReq*)signal->getDataPtr();
const TuxFragReq* const req = &reqCopy;
IndexPtr indexPtr;
......@@ -61,6 +66,11 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
fragOpPtr.p->m_fragId = req->fragId;
fragOpPtr.p->m_fragNo = indexPtr.p->m_numFrags;
fragOpPtr.p->m_numAttrsRecvd = 0;
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
debugOut << "Seize frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
}
#endif
// check if index has place for more fragments
ndbrequire(indexPtr.p->m_numFrags < MaxIndexFragments);
// seize new fragment record
......@@ -129,6 +139,14 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
debugOut << "Add frag " << fragPtr.i << " " << *fragPtr.p << endl;
}
#endif
// error inserts
if (ERROR_INSERTED(12001) && fragOpPtr.p->m_fragNo == 0 ||
ERROR_INSERTED(12002) && fragOpPtr.p->m_fragNo == 1) {
jam();
errorCode = (TuxFragRef::ErrorCode)1;
CLEAR_ERROR_INSERT_VALUE;
break;
}
// success
TuxFragConf* const conf = (TuxFragConf*)signal->getDataPtrSend();
conf->userPtr = req->userPtr;
......@@ -145,10 +163,18 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
ref->errorCode = errorCode;
sendSignal(req->userRef, GSN_TUXFRAGREF,
signal, TuxFragRef::SignalLength, JBB);
if (fragOpPtr.i != RNIL)
if (fragOpPtr.i != RNIL) {
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
debugOut << "Release on frag error frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
}
#endif
c_fragOpPool.release(fragOpPtr);
if (indexPtr.i != RNIL)
dropIndex(signal, indexPtr, 0, 0);
}
if (indexPtr.i != RNIL) {
jam();
// let DICT drop the unfinished index
}
}
void
......@@ -203,7 +229,16 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
}
}
#endif
if (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd) {
const bool lastAttr = (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd);
if (ERROR_INSERTED(12003) && fragOpPtr.p->m_fragNo == 0 && attrId == 0 ||
ERROR_INSERTED(12004) && fragOpPtr.p->m_fragNo == 0 && lastAttr ||
ERROR_INSERTED(12005) && fragOpPtr.p->m_fragNo == 1 && attrId == 0 ||
ERROR_INSERTED(12006) && fragOpPtr.p->m_fragNo == 1 && lastAttr) {
errorCode = (TuxAddAttrRef::ErrorCode)1;
CLEAR_ERROR_INSERT_VALUE;
break;
}
if (lastAttr) {
jam();
// initialize tree header
TreeHead& tree = fragPtr.p->m_tree;
......@@ -246,11 +281,17 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
}
#endif
// fragment is defined
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
debugOut << "Release frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
}
#endif
c_fragOpPool.release(fragOpPtr);
}
// success
TuxAddAttrConf* conf = (TuxAddAttrConf*)signal->getDataPtrSend();
conf->userPtr = fragOpPtr.p->m_userPtr;
conf->lastAttr = lastAttr;
sendSignal(fragOpPtr.p->m_userRef, GSN_TUX_ADD_ATTRCONF,
signal, TuxAddAttrConf::SignalLength, JBB);
return;
......@@ -261,8 +302,32 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
ref->errorCode = errorCode;
sendSignal(fragOpPtr.p->m_userRef, GSN_TUX_ADD_ATTRREF,
signal, TuxAddAttrRef::SignalLength, JBB);
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
debugOut << "Release on attr error frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
}
#endif
c_fragOpPool.release(fragOpPtr);
dropIndex(signal, indexPtr, 0, 0);
// let DICT drop the unfinished index
}
/*
* LQH aborts on-going create index operation.
*/
void
Dbtux::abortAddFragOp(Signal* signal)
{
FragOpPtr fragOpPtr;
IndexPtr indexPtr;
c_fragOpPool.getPtr(fragOpPtr, signal->theData[1]);
c_indexPool.getPtr(indexPtr, fragOpPtr.p->m_indexId);
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
debugOut << "Release on abort frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
}
#endif
c_fragOpPool.release(fragOpPtr);
// let DICT drop the unfinished index
}
/*
......@@ -341,20 +406,13 @@ Dbtux::dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 sen
{
jam();
indexPtr.p->m_state = Index::Dropping;
// drop one fragment at a time
if (indexPtr.p->m_numFrags > 0) {
// drop fragments
while (indexPtr.p->m_numFrags > 0) {
jam();
unsigned i = --indexPtr.p->m_numFrags;
Uint32 i = --indexPtr.p->m_numFrags;
FragPtr fragPtr;
c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
c_fragPool.release(fragPtr);
// the real time break is not used for anything currently
signal->theData[0] = TuxContinueB::DropIndex;
signal->theData[1] = indexPtr.i;
signal->theData[2] = senderRef;
signal->theData[3] = senderData;
sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
return;
}
// drop attributes
if (indexPtr.p->m_descPage != RNIL) {
......
......@@ -193,7 +193,7 @@ extern "C" {
{
return (Ndb_mgmclient_handle) new Ndb_mgmclient(connect_string);
}
int ndb_mgmclient_execute(Ndb_mgmclient_handle h, int argc, const char** argv)
int ndb_mgmclient_execute(Ndb_mgmclient_handle h, int argc, char** argv)
{
return ((Ndb_mgmclient*)h)->execute(argc, argv, 1);
}
......@@ -226,7 +226,7 @@ extern "C" {
#include <util/InputStream.hpp>
#include <util/OutputStream.hpp>
int Ndb_mgmclient::execute(int argc, const char** argv, int _try_reconnect)
int Ndb_mgmclient::execute(int argc, char** argv, int _try_reconnect)
{
if (argc <= 0)
return 0;
......@@ -1386,7 +1386,7 @@ void
CommandInterpreter::executeDumpState(int processId, const char* parameters,
bool all)
{
if(parameters == 0 || strlen(parameters) == 0){
if(emptyString(parameters)){
ndbout << "Expected argument" << endl;
return;
}
......@@ -1806,6 +1806,10 @@ CommandInterpreter::executeEventReporting(int processId,
const char* parameters,
bool all)
{
if (emptyString(parameters)) {
ndbout << "Expected argument" << endl;
return;
}
connect();
BaseString tmp(parameters);
......@@ -1906,6 +1910,7 @@ void
CommandInterpreter::executeAbortBackup(char* parameters)
{
connect();
strtok(parameters, " ");
struct ndb_mgm_reply reply;
char* id = strtok(NULL, "\0");
......
......@@ -23,7 +23,7 @@ extern "C" {
typedef void* Ndb_mgmclient_handle;
Ndb_mgmclient_handle ndb_mgmclient_handle_create(const char *connect_string);
int ndb_mgmclient_execute(Ndb_mgmclient_handle, int argc, const char** argv);
int ndb_mgmclient_execute(Ndb_mgmclient_handle, int argc, char** argv);
int ndb_mgmclient_handle_destroy(Ndb_mgmclient_handle);
#ifdef __cplusplus
......
......@@ -24,7 +24,7 @@ public:
Ndb_mgmclient(const char*);
~Ndb_mgmclient();
int execute(const char *_line, int _try_reconnect=-1);
int execute(int argc, const char** argv, int _try_reconnect=-1);
int execute(int argc, char** argv, int _try_reconnect=-1);
int disconnect();
private:
CommandInterpreter *m_cmd;
......
......@@ -272,7 +272,7 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
CHARSET_INFO* cs = tAttrInfo->m_cs;
if (cs != 0) {
// current limitation: strxfrm does not increase length
assert(cs->strxfrm_multiply == 1);
assert(cs->strxfrm_multiply <= 1);
unsigned n =
(*cs->coll->strnxfrm)(cs,
(uchar*)xfrmData, sizeof(xfrmData),
......
......@@ -142,7 +142,7 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
CHARSET_INFO* cs = tAttrInfo->m_cs;
if (cs != 0) {
// current limitation: strxfrm does not increase length
assert(cs->strxfrm_multiply == 1);
assert(cs->strxfrm_multiply <= 1);
unsigned n =
(*cs->coll->strnxfrm)(cs,
(uchar*)xfrmData, sizeof(xfrmData),
......
......@@ -1091,7 +1091,7 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
Uint32 xfrmData[2000];
if (cs != NULL && aValue != NULL) {
// current limitation: strxfrm does not increase length
assert(cs->strxfrm_multiply == 1);
assert(cs->strxfrm_multiply <= 1);
unsigned n =
(*cs->coll->strnxfrm)(cs,
(uchar*)xfrmData, sizeof(xfrmData),
......
......@@ -1479,6 +1479,69 @@ runTestDictionaryPerf(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
int runFailAddFragment(NDBT_Context* ctx, NDBT_Step* step){
static int tuplst[] = { 4007, 4008, 4009, 4010, 4011, 4012 };
static int tuxlst[] = { 12001, 12002, 12003, 12004, 12005, 12006 };
static unsigned tupcnt = sizeof(tuplst)/sizeof(tuplst[0]);
static unsigned tuxcnt = sizeof(tuxlst)/sizeof(tuxlst[0]);
NdbRestarter restarter;
int nodeId = restarter.getMasterNodeId();
Ndb* pNdb = GETNDB(step);
NdbDictionary::Dictionary* pDic = pNdb->getDictionary();
NdbDictionary::Table tab(*ctx->getTab());
tab.setFragmentType(NdbDictionary::Object::FragAllLarge);
// ordered index on first few columns
NdbDictionary::Index idx("X");
idx.setTable(tab.getName());
idx.setType(NdbDictionary::Index::OrderedIndex);
idx.setLogging(false);
for (int i_hate_broken_compilers = 0;
i_hate_broken_compilers < 3 &&
i_hate_broken_compilers < tab.getNoOfColumns();
i_hate_broken_compilers++) {
idx.addColumn(*tab.getColumn(i_hate_broken_compilers));
}
const int loops = ctx->getNumLoops();
int result = NDBT_OK;
(void)pDic->dropTable(tab.getName());
for (int l = 0; l < loops; l++) {
for (unsigned i1 = 0; i1 < tupcnt; i1++) {
unsigned j = (l == 0 ? i1 : myRandom48(tupcnt));
int errval = tuplst[j];
g_info << "insert error node=" << nodeId << " value=" << errval << endl;
CHECK2(restarter.insertErrorInNode(nodeId, errval) == 0,
"failed to set error insert");
CHECK2(pDic->createTable(tab) != 0,
"failed to fail after error insert " << errval);
CHECK2(pDic->createTable(tab) == 0,
pDic->getNdbError());
CHECK2(pDic->dropTable(tab.getName()) == 0,
pDic->getNdbError());
}
for (unsigned i2 = 0; i2 < tuxcnt; i2++) {
unsigned j = (l == 0 ? i2 : myRandom48(tuxcnt));
int errval = tuxlst[j];
g_info << "insert error node=" << nodeId << " value=" << errval << endl;
CHECK2(restarter.insertErrorInNode(nodeId, errval) == 0,
"failed to set error insert");
CHECK2(pDic->createTable(tab) == 0,
pDic->getNdbError());
CHECK2(pDic->createIndex(idx) != 0,
"failed to fail after error insert " << errval);
CHECK2(pDic->createIndex(idx) == 0,
pDic->getNdbError());
CHECK2(pDic->dropTable(tab.getName()) == 0,
pDic->getNdbError());
}
}
end:
return result;
}
NDBT_TESTSUITE(testDict);
TESTCASE("CreateAndDrop",
"Try to create and drop the table loop number of times\n"){
......@@ -1574,6 +1637,10 @@ TESTCASE("DictionaryPerf",
""){
INITIALIZER(runTestDictionaryPerf);
}
TESTCASE("FailAddFragment",
"Fail add fragment or attribute in TUP or TUX\n"){
INITIALIZER(runFailAddFragment);
}
NDBT_TESTSUITE_END(testDict);
int main(int argc, const char** argv){
......
......@@ -26,7 +26,7 @@ ndb_select_all_SOURCES = select_all.cpp \
../test/src/NDBT_ResultRow.cpp \
$(tools_common_sources)
ndb_select_count_SOURCES = select_count.cpp $(tools_common_sources)
ndb_restore_SOURCES = restore/main.cpp \
ndb_restore_SOURCES = restore/restore_main.cpp \
restore/consumer.cpp \
restore/consumer_restore.cpp \
restore/consumer_printer.cpp \
......
......@@ -74,7 +74,7 @@ static struct my_option my_long_options[] =
"No of parallel transactions during restore of data."
"(parallelism can be 1 to 1024)",
(gptr*) &ga_nParallelism, (gptr*) &ga_nParallelism, 0,
GET_INT, REQUIRED_ARG, 128, 0, 0, 0, 0, 0 },
GET_INT, REQUIRED_ARG, 128, 1, 1024, 0, 1, 0 },
{ "print", 256, "Print data and log to stdout",
(gptr*) &_print, (gptr*) &_print, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
......@@ -120,6 +120,20 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
case 'V':
print_version();
exit(0);
case 'n':
if (ga_nodeId == 0)
{
printf("Error in --nodeid,-n setting, see --help\n");
exit(1);
}
break;
case 'b':
if (ga_backupId == 0)
{
printf("Error in --backupid,-b setting, see --help\n");
exit(1);
}
break;
case '?':
usage();
exit(0);
......@@ -131,11 +145,8 @@ readArguments(int *pargc, char*** pargv)
{
const char *load_default_groups[]= { "ndb_tools","ndb_restore",0 };
load_defaults("my",load_default_groups,pargc,pargv);
if (handle_options(pargc, pargv, my_long_options, get_one_option) ||
ga_nodeId == 0 ||
ga_backupId == 0 ||
ga_nParallelism < 1 ||
ga_nParallelism >1024) {
if (handle_options(pargc, pargv, my_long_options, get_one_option))
{
exit(1);
}
......@@ -343,7 +354,8 @@ main(int argc, char** argv)
if (res < 0)
{
err << "Restore: An error occured while restoring data. Exiting... res=" << res << endl;
err << "Restore: An error occured while restoring data. Exiting... "
<< "res=" << res << endl;
return -1;
}
......@@ -369,7 +381,8 @@ main(int argc, char** argv)
}
if (res < 0)
{
err << "Restore: An restoring the data log. Exiting... res=" << res << endl;
err << "Restore: An restoring the data log. Exiting... res="
<< res << endl;
return -1;
}
logIter.validateFooter(); //not implemented
......
......@@ -1037,7 +1037,8 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
{
Field *field= table->field[i];
if ((thd->query_id == field->query_id) ||
m_retrieve_all_fields)
m_retrieve_all_fields ||
(field->flags & PRI_KEY_FLAG) && m_retrieve_primary_key)
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(trans->getNdbError());
......@@ -1112,6 +1113,34 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
DBUG_RETURN(0);
}
/*
Peek to check if a particular row already exists
*/
int ha_ndbcluster::peek_row()
{
NdbConnection *trans= m_active_trans;
NdbOperation *op;
THD *thd= current_thd;
DBUG_ENTER("peek_row");
NdbOperation::LockMode lm=
(NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) ||
op->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
int res;
if ((res= set_primary_key(op)))
ERR_RETURN(trans->getNdbError());
if (execute_no_commit_ie(this,trans) != 0)
{
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans));
}
DBUG_RETURN(0);
}
/*
Read one record from NDB using unique secondary index
......@@ -1157,7 +1186,7 @@ int ha_ndbcluster::unique_index_read(const byte *key,
{
Field *field= table->field[i];
if ((thd->query_id == field->query_id) ||
(field->flags & PRI_KEY_FLAG))
(field->flags & PRI_KEY_FLAG)) // && m_retrieve_primary_key ??
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
......@@ -1586,7 +1615,7 @@ int ha_ndbcluster::filtered_scan(const byte *key, uint key_len,
Field* field= key_part->field;
uint ndb_fieldnr= key_part->fieldnr-1;
DBUG_PRINT("key_part", ("fieldnr: %d", ndb_fieldnr));
// const NDBCOL *col= tab->getColumn(ndb_fieldnr);
//const NDBCOL *col= ((const NDBTAB *) m_table)->getColumn(ndb_fieldnr);
uint32 field_len= field->pack_length();
DBUG_DUMP("key", (char*)key, field_len);
......@@ -1655,9 +1684,17 @@ int ha_ndbcluster::write_row(byte *record)
int res;
DBUG_ENTER("write_row");
if(m_ignore_dup_key_not_supported)
if(m_ignore_dup_key && table->primary_key != MAX_KEY)
{
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
int peek_res= peek_row();
if (!peek_res)
{
m_dupkey= table->primary_key;
DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
}
if (peek_res != HA_ERR_KEY_NOT_FOUND)
DBUG_RETURN(peek_res);
}
statistic_increment(ha_write_count,&LOCK_status);
......@@ -2170,17 +2207,15 @@ void ha_ndbcluster::print_results()
break;
}
case NdbDictionary::Column::Char:{
char buf[field->pack_length()+1];
char *value= (char *) field->ptr;
snprintf(buf, field->pack_length(), "%s", value);
fprintf(DBUG_FILE, "Char\t'%s'", buf);
const char *value= (char *) field->ptr;
fprintf(DBUG_FILE, "Char\t'%.*s'", field->pack_length(), value);
break;
}
case NdbDictionary::Column::Varchar:
case NdbDictionary::Column::Binary:
case NdbDictionary::Column::Varbinary: {
char *value= (char *) field->ptr;
fprintf(DBUG_FILE, "'%s'", value);
const char *value= (char *) field->ptr;
fprintf(DBUG_FILE, "Var\t'%.*s'", field->pack_length(), value);
break;
}
case NdbDictionary::Column::Datetime: {
......@@ -2677,15 +2712,15 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
m_use_write= TRUE;
} else
{
if (table->keys)
m_ignore_dup_key_not_supported= TRUE;
DBUG_PRINT("info", ("Ignoring duplicate key"));
m_ignore_dup_key= TRUE;
}
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= FALSE;
m_ignore_dup_key_not_supported= FALSE;
m_ignore_dup_key= FALSE;
break;
case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
where field->query_id is the same as
......@@ -2704,6 +2739,7 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
break;
case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_PRIMARY_KEY"));
m_retrieve_primary_key= TRUE;
break;
case HA_EXTRA_CHANGE_KEY_TO_UNIQUE:
DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_UNIQUE"));
......@@ -2975,6 +3011,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
DBUG_ASSERT(m_active_trans);
// Start of transaction
m_retrieve_all_fields= FALSE;
m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
{
NDBDICT *dict= m_ndb->getDictionary();
......@@ -3067,6 +3104,7 @@ int ha_ndbcluster::start_stmt(THD *thd)
// Start of statement
m_retrieve_all_fields= FALSE;
m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
DBUG_RETURN(error);
......@@ -3673,9 +3711,10 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NO_PREFIX_CHAR_KEYS),
m_share(0),
m_use_write(FALSE),
m_ignore_dup_key_not_supported(FALSE),
m_ignore_dup_key(FALSE),
m_primary_key_update(FALSE),
m_retrieve_all_fields(FALSE),
m_retrieve_primary_key(FALSE),
m_rows_to_insert(1),
m_rows_inserted(0),
m_bulk_insert_rows(1024),
......
......@@ -163,6 +163,7 @@ class ha_ndbcluster: public handler
int pk_read(const byte *key, uint key_len, byte *buf);
int complemented_pk_read(const byte *old_data, byte *new_data);
int peek_row();
int unique_index_read(const byte *key, uint key_len,
byte *buf);
int ordered_index_scan(const key_range *start_key,
......@@ -222,9 +223,10 @@ class ha_ndbcluster: public handler
typedef union { NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
bool m_use_write;
bool m_ignore_dup_key_not_supported;
bool m_ignore_dup_key;
bool m_primary_key_update;
bool m_retrieve_all_fields;
bool m_retrieve_primary_key;
ha_rows m_rows_to_insert;
ha_rows m_rows_inserted;
ha_rows m_bulk_insert_rows;
......
......@@ -1032,7 +1032,7 @@ bool net_request_file(NET* net, const char* fname)
}
const char *rewrite_db(const char* db, uint *new_len)
const char *rewrite_db(const char* db, uint32 *new_len)
{
if (replicate_rewrite_db.is_empty() || !db)
return db;
......@@ -1043,7 +1043,7 @@ const char *rewrite_db(const char* db, uint *new_len)
{
if (!strcmp(tmp->key, db))
{
*new_len= strlen(tmp->val);
*new_len= (uint32)strlen(tmp->val);
return tmp->val;
}
}
......
......@@ -510,7 +510,7 @@ int add_table_rule(HASH* h, const char* table_spec);
int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec);
void init_table_rule_hash(HASH* h, bool* h_inited);
void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited);
const char *rewrite_db(const char* db, uint *new_db_len);
const char *rewrite_db(const char* db, uint32 *new_db_len);
const char *print_slave_db_safe(const char *db);
int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code);
void skip_load_data_infile(NET* net);
......
......@@ -453,8 +453,8 @@ int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
setup_tables(insert_table_list) ||
setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0) ||
(duplic == DUP_UPDATE &&
(setup_fields(thd, 0, insert_table_list, update_fields, 0, 0, 0) ||
setup_fields(thd, 0, insert_table_list, update_values, 0, 0, 0))))
(setup_fields(thd, 0, insert_table_list, update_fields, 1, 0, 0) ||
setup_fields(thd, 0, insert_table_list, update_values, 1, 0, 0))))
DBUG_RETURN(-1);
if (find_real_table_in_list(table_list->next,
table_list->db, table_list->real_name))
......@@ -462,6 +462,9 @@ int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name);
DBUG_RETURN(-1);
}
if (duplic == DUP_UPDATE || duplic == DUP_REPLACE)
table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
DBUG_RETURN(0);
}
......
......@@ -4123,7 +4123,7 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length)
- SET uses tot_length.
*/
void calculate_interval_lengths(THD *thd, TYPELIB *interval,
uint *max_length, uint *tot_length)
uint32 *max_length, uint32 *tot_length)
{
const char **pos;
uint *len;
......@@ -4135,7 +4135,7 @@ void calculate_interval_lengths(THD *thd, TYPELIB *interval,
*len= (uint) strip_sp((char*) *pos);
uint length= cs->cset->numchars(cs, *pos, *pos + *len);
*tot_length+= length;
set_if_bigger(*max_length, length);
set_if_bigger(*max_length, (uint32)length);
}
}
......@@ -4454,7 +4454,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
if (new_field->pack_length > 4)
new_field->pack_length=8;
new_field->interval=interval;
uint dummy_max_length;
uint32 dummy_max_length;
calculate_interval_lengths(thd, interval,
&dummy_max_length, &new_field->length);
new_field->length+= (interval->count - 1);
......@@ -4484,7 +4484,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
new_field->interval=interval;
new_field->pack_length=interval->count < 256 ? 1 : 2; // Should be safe
uint dummy_tot_length;
uint32 dummy_tot_length;
calculate_interval_lengths(thd, interval,
&new_field->length, &dummy_tot_length);
set_if_smaller(new_field->length,MAX_FIELD_WIDTH-1);
......
......@@ -635,12 +635,15 @@ static void verify_prepare_field(MYSQL_RES *result,
unsigned long length, const char *def)
{
MYSQL_FIELD *field;
CHARSET_INFO *cs;
if (!(field= mysql_fetch_field_direct(result, no)))
{
fprintf(stdout, "\n *** ERROR: FAILED TO GET THE RESULT ***");
exit(1);
}
cs= get_charset(field->charsetnr, 0);
DIE_UNLESS(cs);
if (!opt_silent)
{
fprintf(stdout, "\n field[%d]:", no);
......@@ -654,7 +657,7 @@ static void verify_prepare_field(MYSQL_RES *result,
field->org_table, org_table);
fprintf(stdout, "\n database :`%s`\t(expected: `%s`)", field->db, db);
fprintf(stdout, "\n length :`%ld`\t(expected: `%ld`)",
field->length, length);
field->length, length * cs->mbmaxlen);
fprintf(stdout, "\n maxlength:`%ld`", field->max_length);
fprintf(stdout, "\n charsetnr:`%d`", field->charsetnr);
fprintf(stdout, "\n default :`%s`\t(expected: `%s`)",
......@@ -663,11 +666,24 @@ static void verify_prepare_field(MYSQL_RES *result,
}
DIE_UNLESS(strcmp(field->name, name) == 0);
DIE_UNLESS(strcmp(field->org_name, org_name) == 0);
DIE_UNLESS(field->type == type);
/*
XXX: silent column specification change works based on number of
bytes a column occupies. So CHAR -> VARCHAR upgrade is possible even
for CHAR(2) column if its character set is multibyte.
VARCHAR -> CHAR downgrade won't work for VARCHAR(3) as one would
expect.
*/
if (cs->mbmaxlen == 1)
DIE_UNLESS(field->type == type);
DIE_UNLESS(strcmp(field->table, table) == 0);
DIE_UNLESS(strcmp(field->org_table, org_table) == 0);
DIE_UNLESS(strcmp(field->db, db) == 0);
DIE_UNLESS(field->length == length);
/*
Character set should be taken into account for multibyte encodings, such
as utf8. Field length is calculated as number of characters * maximum
number of bytes a character can occupy.
*/
DIE_UNLESS(field->length == length * cs->mbmaxlen);
if (def)
DIE_UNLESS(strcmp(field->def, def) == 0);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment