Commit 56fa7bfd authored by pekka@mysql.com's avatar pekka@mysql.com

Merge

parents 0d56d7ff f14d9c67
drop table if exists t1;
set autocommit=0;
create table t1 (
a int not null primary key,
b text not null,
c int not null,
d longblob,
key (c)
) engine=ndbcluster;
set @x0 = '01234567012345670123456701234567';
set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0);
set @b1 = 'b1';
set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
set @b1 = concat(@b1,@x0);
set @d1 = 'dd1';
set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
set @b2 = 'b2';
set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
set @d2 = 'dd2';
set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
select length(@x0),length(@b1),length(@d1) from dual;
length(@x0) length(@b1) length(@d1)
256 2256 3000
select length(@x0),length(@b2),length(@d2) from dual;
length(@x0) length(@b2) length(@d2)
256 20000 30000
insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
commit;
explain select * from t1 where a = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a=1;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
1 2256 b1 3000 dd1
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where a=2;
a length(b) substr(b,1+2*9000,2) length(d) substr(d,1+3*9000,3)
2 20000 b2 30000 dd2
update t1 set b=@b2,d=@d2 where a=1;
update t1 set b=@b1,d=@d1 where a=2;
commit;
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where a=1;
a length(b) substr(b,1+2*9000,2) length(d) substr(d,1+3*9000,3)
1 20000 b2 30000 dd2
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a=2;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
2 2256 b1 3000 dd1
update t1 set b=concat(b,b),d=concat(d,d) where a=1;
update t1 set b=concat(b,b),d=concat(d,d) where a=2;
commit;
select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
from t1 where a=1;
a length(b) substr(b,1+4*9000,2) length(d) substr(d,1+6*9000,3)
1 40000 b2 60000 dd2
select a,length(b),substr(b,1+4*900,2),length(d),substr(d,1+6*900,3)
from t1 where a=2;
a length(b) substr(b,1+4*900,2) length(d) substr(d,1+6*900,3)
2 4512 b1 6000 dd1
update t1 set d=null where a=1;
commit;
select a from t1 where d is null;
a
1
delete from t1 where a=1;
delete from t1 where a=2;
commit;
select count(*) from t1;
count(*)
0
insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
commit;
explain select * from t1 where c = 111;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref c c 4 const 10 Using where
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where c=111;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
1 2256 b1 3000 dd1
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where c=222;
a length(b) substr(b,1+2*9000,2) length(d) substr(d,1+3*9000,3)
2 20000 b2 30000 dd2
update t1 set b=@b2,d=@d2 where c=111;
update t1 set b=@b1,d=@d1 where c=222;
commit;
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where c=111;
a length(b) substr(b,1+2*9000,2) length(d) substr(d,1+3*9000,3)
1 20000 b2 30000 dd2
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where c=222;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
2 2256 b1 3000 dd1
update t1 set d=null where c=111;
commit;
select a from t1 where d is null;
a
1
delete from t1 where c=111;
delete from t1 where c=222;
commit;
select count(*) from t1;
count(*)
0
insert into t1 values(1,'b1',111,'dd1');
insert into t1 values(2,'b2',222,'dd2');
insert into t1 values(3,'b3',333,'dd3');
insert into t1 values(4,'b4',444,'dd4');
insert into t1 values(5,'b5',555,'dd5');
insert into t1 values(6,'b6',666,'dd6');
insert into t1 values(7,'b7',777,'dd7');
insert into t1 values(8,'b8',888,'dd8');
insert into t1 values(9,'b9',999,'dd9');
commit;
explain select * from t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100
select * from t1 order by a;
a b c d
1 b1 111 dd1
2 b2 222 dd2
3 b3 333 dd3
4 b4 444 dd4
5 b5 555 dd5
6 b6 666 dd6
7 b7 777 dd7
8 b8 888 dd8
9 b9 999 dd9
update t1 set b=concat(a,'x',b),d=concat(a,'x',d);
commit;
select * from t1 order by a;
a b c d
1 1xb1 111 1xdd1
2 2xb2 222 2xdd2
3 3xb3 333 3xdd3
4 4xb4 444 4xdd4
5 5xb5 555 5xdd5
6 6xb6 666 6xdd6
7 7xb7 777 7xdd7
8 8xb8 888 8xdd8
9 9xb9 999 9xdd9
delete from t1;
commit;
select count(*) from t1;
count(*)
0
insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
commit;
explain select * from t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 100
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 order by a;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
1 2256 b1 3000 dd1
2 20000 b2 30000 dd2
update t1 set b=concat(b,b),d=concat(d,d);
commit;
select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
from t1 order by a;
a length(b) substr(b,1+4*9000,2) length(d) substr(d,1+6*9000,3)
1 4512 6000
2 40000 b2 60000 dd2
delete from t1;
commit;
select count(*) from t1;
count(*)
0
insert into t1 values(1,'b1',111,'dd1');
insert into t1 values(2,'b2',222,'dd2');
insert into t1 values(3,'b3',333,'dd3');
insert into t1 values(4,'b4',444,'dd4');
insert into t1 values(5,'b5',555,'dd5');
insert into t1 values(6,'b6',666,'dd6');
insert into t1 values(7,'b7',777,'dd7');
insert into t1 values(8,'b8',888,'dd8');
insert into t1 values(9,'b9',999,'dd9');
commit;
explain select * from t1 where c >= 100 order by a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 4 NULL 10 Using where; Using filesort
select * from t1 where c >= 100 order by a;
a b c d
1 b1 111 dd1
2 b2 222 dd2
3 b3 333 dd3
4 b4 444 dd4
5 b5 555 dd5
6 b6 666 dd6
7 b7 777 dd7
8 b8 888 dd8
9 b9 999 dd9
update t1 set b=concat(a,'x',b),d=concat(a,'x',d)
where c >= 100;
commit;
select * from t1 where c >= 100 order by a;
a b c d
1 1xb1 111 1xdd1
2 2xb2 222 2xdd2
3 3xb3 333 3xdd3
4 4xb4 444 4xdd4
5 5xb5 555 5xdd5
6 6xb6 666 6xdd6
7 7xb7 777 7xdd7
8 8xb8 888 8xdd8
9 9xb9 999 9xdd9
delete from t1 where c >= 100;
commit;
select count(*) from t1;
count(*)
0
insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
commit;
explain select * from t1 where c >= 100 order by a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 4 NULL 10 Using where; Using filesort
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where c >= 100 order by a;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
1 2256 b1 3000 dd1
2 20000 b2 30000 dd2
update t1 set b=concat(b,b),d=concat(d,d);
commit;
select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
from t1 where c >= 100 order by a;
a length(b) substr(b,1+4*9000,2) length(d) substr(d,1+6*9000,3)
1 4512 6000
2 40000 b2 60000 dd2
delete from t1 where c >= 100;
commit;
select count(*) from t1;
count(*)
0
insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a = 0;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a = 1;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
1 2256 b1 3000 dd1
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a = 2;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
2 20000 b2 30000 dd2
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 order by a;
a length(b) substr(b,1+2*900,2) length(d) substr(d,1+3*900,3)
1 2256 b1 3000 dd1
2 20000 b2 30000 dd2
rollback;
select count(*) from t1;
count(*)
0
--source include/have_ndb.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
#
# Minimal NDB blobs test.
#
# On NDB API level there is an extensive test program "testBlobs".
# A prerequisite for this handler test is that "testBlobs" succeeds.
#
# make test harder with autocommit off
set autocommit=0;
create table t1 (
a int not null primary key,
b text not null,
c int not null,
d longblob,
key (c)
) engine=ndbcluster;
# -- values --
# x0 size 256 (current inline size)
set @x0 = '01234567012345670123456701234567';
set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0);
# b1 length 2000+256 (blob part aligned)
set @b1 = 'b1';
set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1);
set @b1 = concat(@b1,@x0);
# d1 length 3000
set @d1 = 'dd1';
set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1);
# b2 length 20000
set @b2 = 'b2';
set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2);
# d2 length 30000
set @d2 = 'dd2';
set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2);
select length(@x0),length(@b1),length(@d1) from dual;
select length(@x0),length(@b2),length(@d2) from dual;
# -- pk ops --
insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
commit;
explain select * from t1 where a = 1;
# pk read
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a=1;
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where a=2;
# pk update
update t1 set b=@b2,d=@d2 where a=1;
update t1 set b=@b1,d=@d1 where a=2;
commit;
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where a=1;
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a=2;
# pk update
update t1 set b=concat(b,b),d=concat(d,d) where a=1;
update t1 set b=concat(b,b),d=concat(d,d) where a=2;
commit;
select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
from t1 where a=1;
select a,length(b),substr(b,1+4*900,2),length(d),substr(d,1+6*900,3)
from t1 where a=2;
# pk update to null
update t1 set d=null where a=1;
commit;
select a from t1 where d is null;
# pk delete
delete from t1 where a=1;
delete from t1 where a=2;
commit;
select count(*) from t1;
# -- hash index ops --
insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
commit;
explain select * from t1 where c = 111;
# hash key read
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where c=111;
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where c=222;
# hash key update
update t1 set b=@b2,d=@d2 where c=111;
update t1 set b=@b1,d=@d1 where c=222;
commit;
select a,length(b),substr(b,1+2*9000,2),length(d),substr(d,1+3*9000,3)
from t1 where c=111;
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where c=222;
# hash key update to null
update t1 set d=null where c=111;
commit;
select a from t1 where d is null;
# hash key delete
delete from t1 where c=111;
delete from t1 where c=222;
commit;
select count(*) from t1;
# -- table scan ops, short values --
insert into t1 values(1,'b1',111,'dd1');
insert into t1 values(2,'b2',222,'dd2');
insert into t1 values(3,'b3',333,'dd3');
insert into t1 values(4,'b4',444,'dd4');
insert into t1 values(5,'b5',555,'dd5');
insert into t1 values(6,'b6',666,'dd6');
insert into t1 values(7,'b7',777,'dd7');
insert into t1 values(8,'b8',888,'dd8');
insert into t1 values(9,'b9',999,'dd9');
commit;
explain select * from t1;
# table scan read
select * from t1 order by a;
# table scan update
update t1 set b=concat(a,'x',b),d=concat(a,'x',d);
commit;
select * from t1 order by a;
# table scan delete
delete from t1;
commit;
select count(*) from t1;
# -- table scan ops, long values --
insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
commit;
explain select * from t1;
# table scan read
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 order by a;
# table scan update
update t1 set b=concat(b,b),d=concat(d,d);
commit;
select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
from t1 order by a;
# table scan delete
delete from t1;
commit;
select count(*) from t1;
# -- range scan ops, short values --
insert into t1 values(1,'b1',111,'dd1');
insert into t1 values(2,'b2',222,'dd2');
insert into t1 values(3,'b3',333,'dd3');
insert into t1 values(4,'b4',444,'dd4');
insert into t1 values(5,'b5',555,'dd5');
insert into t1 values(6,'b6',666,'dd6');
insert into t1 values(7,'b7',777,'dd7');
insert into t1 values(8,'b8',888,'dd8');
insert into t1 values(9,'b9',999,'dd9');
commit;
explain select * from t1 where c >= 100 order by a;
# range scan read
select * from t1 where c >= 100 order by a;
# range scan update
update t1 set b=concat(a,'x',b),d=concat(a,'x',d)
where c >= 100;
commit;
select * from t1 where c >= 100 order by a;
# range scan delete
delete from t1 where c >= 100;
commit;
select count(*) from t1;
# -- range scan ops, long values --
insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
commit;
explain select * from t1 where c >= 100 order by a;
# range scan read
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where c >= 100 order by a;
# range scan update
update t1 set b=concat(b,b),d=concat(d,d);
commit;
select a,length(b),substr(b,1+4*9000,2),length(d),substr(d,1+6*9000,3)
from t1 where c >= 100 order by a;
# range scan delete
delete from t1 where c >= 100;
commit;
select count(*) from t1;
# -- rollback --
insert into t1 values(1,@b1,111,@d1);
insert into t1 values(2,@b2,222,@d2);
# 626
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a = 0;
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a = 1;
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 where a = 2;
select a,length(b),substr(b,1+2*900,2),length(d),substr(d,1+3*900,3)
from t1 order by a;
rollback;
select count(*) from t1;
--drop table t1;
...@@ -311,7 +311,7 @@ public: ...@@ -311,7 +311,7 @@ public:
ExtDatetime = NdbSqlUtil::Type::Datetime, ExtDatetime = NdbSqlUtil::Type::Datetime,
ExtTimespec = NdbSqlUtil::Type::Timespec, ExtTimespec = NdbSqlUtil::Type::Timespec,
ExtBlob = NdbSqlUtil::Type::Blob, ExtBlob = NdbSqlUtil::Type::Blob,
ExtClob = NdbSqlUtil::Type::Clob ExtText = NdbSqlUtil::Type::Text
}; };
// Attribute data interpretation // Attribute data interpretation
...@@ -435,7 +435,7 @@ public: ...@@ -435,7 +435,7 @@ public:
AttributeArraySize = 12 * AttributeExtLength; AttributeArraySize = 12 * AttributeExtLength;
return true; return true;
case DictTabInfo::ExtBlob: case DictTabInfo::ExtBlob:
case DictTabInfo::ExtClob: case DictTabInfo::ExtText:
AttributeType = DictTabInfo::StringType; AttributeType = DictTabInfo::StringType;
AttributeSize = DictTabInfo::an8Bit; AttributeSize = DictTabInfo::an8Bit;
// head + inline part [ attr precision ] // head + inline part [ attr precision ]
......
...@@ -50,24 +50,33 @@ class NdbColumnImpl; ...@@ -50,24 +50,33 @@ class NdbColumnImpl;
* - closed: after transaction commit * - closed: after transaction commit
* - invalid: after rollback or transaction close * - invalid: after rollback or transaction close
* *
* NdbBlob supports 2 styles of data access: * NdbBlob supports 3 styles of data access:
* *
* - in prepare phase, NdbBlob methods getValue and setValue are used to * - in prepare phase, NdbBlob methods getValue and setValue are used to
* prepare a read or write of a single blob value of known size * prepare a read or write of a blob value of known size
* *
* - in active phase, NdbBlob methods readData and writeData are used to * - in prepare phase, setActiveHook is used to define a routine which
* read or write blob data of undetermined size * is invoked as soon as the handle becomes active
*
* - in active phase, readData and writeData are used to read or write
* blob data of arbitrary size
*
* The styles can be applied in combination (in above order).
*
* Blob operations take effect at next transaction execute. In some
* cases NdbBlob is forced to do implicit executes. To avoid this,
* operate on complete blob parts.
*
* Use NdbConnection::executePendingBlobOps to flush your reads and
* writes. It avoids execute penalty if nothing is pending. It is not
* needed after execute (obviously) or after next scan result.
* *
* NdbBlob methods return -1 on error and 0 on success, and use output * NdbBlob methods return -1 on error and 0 on success, and use output
* parameters when necessary. * parameters when necessary.
* *
* Notes: * Notes:
* - table and its blob part tables are not created atomically * - table and its blob part tables are not created atomically
* - blob data operations take effect at next transaction execute
* - NdbBlob may need to do implicit executes on the transaction
* - read and write of complete parts is much more efficient
* - scan must use the "new" interface NdbScanOperation * - scan must use the "new" interface NdbScanOperation
* - scan with blobs applies hold-read-lock (at minimum)
* - to update a blob in a read op requires exclusive tuple lock * - to update a blob in a read op requires exclusive tuple lock
* - update op in scan must do its own getBlobHandle * - update op in scan must do its own getBlobHandle
* - delete creates implicit, not-accessible blob handles * - delete creates implicit, not-accessible blob handles
...@@ -78,12 +87,16 @@ class NdbColumnImpl; ...@@ -78,12 +87,16 @@ class NdbColumnImpl;
* - scan must use exclusive locking for now * - scan must use exclusive locking for now
* *
* Todo: * Todo:
* - add scan method hold-read-lock-until-next + return-keyinfo * - add scan method hold-read-lock + return-keyinfo
* - better check of keyinfo length when setting keys * - check keyinfo length when setting keys
* - better check of allowed blob op vs locking mode * - check allowed blob ops vs locking mode
* - overload control (too many pending ops)
*/ */
class NdbBlob { class NdbBlob {
public: public:
/**
* State.
*/
enum State { enum State {
Idle = 0, Idle = 0,
Prepared = 1, Prepared = 1,
...@@ -92,9 +105,15 @@ public: ...@@ -92,9 +105,15 @@ public:
Invalid = 9 Invalid = 9
}; };
State getState(); State getState();
/**
* Inline blob header.
*/
struct Head {
Uint64 length;
};
/** /**
* Prepare to read blob value. The value is available after execute. * Prepare to read blob value. The value is available after execute.
* Use isNull to check for NULL and getLength to get the real length * Use getNull to check for NULL and getLength to get the real length
* and to check for truncation. Sets current read/write position to * and to check for truncation. Sets current read/write position to
* after the data read. * after the data read.
*/ */
...@@ -106,6 +125,20 @@ public: ...@@ -106,6 +125,20 @@ public:
* data to null pointer (0) to create a NULL value. * data to null pointer (0) to create a NULL value.
*/ */
int setValue(const void* data, Uint32 bytes); int setValue(const void* data, Uint32 bytes);
/**
* Callback for setActiveHook. Invoked immediately when the prepared
* operation has been executed (but not committed). Any getValue or
* setValue is done first. The blob handle is active so readData or
* writeData etc can be used to manipulate blob value. A user-defined
* argument is passed along. Returns non-zero on error.
*/
typedef int ActiveHook(NdbBlob* me, void* arg);
/**
* Define callback for blob handle activation. The queue of prepared
* operations will be executed in no commit mode up to this point and
* then the callback is invoked.
*/
int setActiveHook(ActiveHook* activeHook, void* arg);
/** /**
* Check if blob is null. * Check if blob is null.
*/ */
...@@ -115,7 +148,7 @@ public: ...@@ -115,7 +148,7 @@ public:
*/ */
int setNull(); int setNull();
/** /**
* Get current length in bytes. Use isNull to distinguish between * Get current length in bytes. Use getNull to distinguish between
* length 0 blob and NULL blob. * length 0 blob and NULL blob.
*/ */
int getLength(Uint64& length); int getLength(Uint64& length);
...@@ -180,6 +213,13 @@ public: ...@@ -180,6 +213,13 @@ public:
static const int ErrAbort = 4268; static const int ErrAbort = 4268;
// "Unknown blob error" // "Unknown blob error"
static const int ErrUnknown = 4269; static const int ErrUnknown = 4269;
/**
* Return info about all blobs in this operation.
*/
// Get first blob in list
NdbBlob* blobsFirstBlob();
// Get next blob in list after this one
NdbBlob* blobsNextBlob();
private: private:
friend class Ndb; friend class Ndb;
...@@ -214,10 +254,11 @@ private: ...@@ -214,10 +254,11 @@ private:
bool theSetFlag; bool theSetFlag;
const char* theSetBuf; const char* theSetBuf;
Uint32 theGetSetBytes; Uint32 theGetSetBytes;
// head // pending ops
struct Head { Uint8 thePendingBlobOps;
Uint64 length; // activation callback
}; ActiveHook* theActiveHook;
void* theActiveHookArg;
// buffers // buffers
struct Buf { struct Buf {
char* data; char* data;
...@@ -235,7 +276,6 @@ private: ...@@ -235,7 +276,6 @@ private:
char* theInlineData; char* theInlineData;
NdbRecAttr* theHeadInlineRecAttr; NdbRecAttr* theHeadInlineRecAttr;
bool theHeadInlineUpdateFlag; bool theHeadInlineUpdateFlag;
bool theNewPartFlag;
// length and read/write position // length and read/write position
int theNullFlag; int theNullFlag;
Uint64 theLength; Uint64 theLength;
...@@ -276,6 +316,11 @@ private: ...@@ -276,6 +316,11 @@ private:
int insertParts(const char* buf, Uint32 part, Uint32 count); int insertParts(const char* buf, Uint32 part, Uint32 count);
int updateParts(const char* buf, Uint32 part, Uint32 count); int updateParts(const char* buf, Uint32 part, Uint32 count);
int deleteParts(Uint32 part, Uint32 count); int deleteParts(Uint32 part, Uint32 count);
// pending ops
int executePendingBlobReads();
int executePendingBlobWrites();
// callbacks
int invokeActiveHook();
// blob handle maintenance // blob handle maintenance
int atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl* aColumn); int atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl* aColumn);
int preExecute(ExecType anExecType, bool& batch); int preExecute(ExecType anExecType, bool& batch);
...@@ -287,6 +332,7 @@ private: ...@@ -287,6 +332,7 @@ private:
void setErrorCode(NdbOperation* anOp, bool invalidFlag = true); void setErrorCode(NdbOperation* anOp, bool invalidFlag = true);
void setErrorCode(NdbConnection* aCon, bool invalidFlag = true); void setErrorCode(NdbConnection* aCon, bool invalidFlag = true);
#ifdef VM_TRACE #ifdef VM_TRACE
int getOperationType() const;
friend class NdbOut& operator<<(NdbOut&, const NdbBlob&); friend class NdbOut& operator<<(NdbOut&, const NdbBlob&);
#endif #endif
}; };
......
...@@ -431,6 +431,15 @@ public: ...@@ -431,6 +431,15 @@ public:
/** @} *********************************************************************/ /** @} *********************************************************************/
/**
* Execute the transaction in NoCommit mode if there are any not-yet
* executed blob part operations of given types. Otherwise do
* nothing. The flags argument is bitwise OR of (1 << optype) where
* optype comes from NdbOperation::OperationType. Only the basic PK
* ops are used (read, insert, update, delete).
*/
int executePendingBlobOps(Uint8 flags = 0xFF);
private: private:
/** /**
* Release completed operations * Release completed operations
...@@ -642,6 +651,7 @@ private: ...@@ -642,6 +651,7 @@ private:
Uint32 theBuddyConPtr; Uint32 theBuddyConPtr;
// optim: any blobs // optim: any blobs
bool theBlobFlag; bool theBlobFlag;
Uint8 thePendingBlobOps;
static void sendTC_COMMIT_ACK(NdbApiSignal *, static void sendTC_COMMIT_ACK(NdbApiSignal *,
Uint32 transId1, Uint32 transId2, Uint32 transId1, Uint32 transId2,
...@@ -869,6 +879,21 @@ NdbConnection::OpSent() ...@@ -869,6 +879,21 @@ NdbConnection::OpSent()
theNoOfOpSent++; theNoOfOpSent++;
} }
/******************************************************************************
void executePendingBlobOps();
******************************************************************************/
#include <stdlib.h>
inline
int
NdbConnection::executePendingBlobOps(Uint8 flags)
{
if (thePendingBlobOps & flags) {
// not executeNoBlobs because there can be new ops with blobs
return execute(NoCommit);
}
return 0;
}
inline inline
Uint32 Uint32
NdbConnection::ptr2int(){ NdbConnection::ptr2int(){
...@@ -876,5 +901,3 @@ NdbConnection::ptr2int(){ ...@@ -876,5 +901,3 @@ NdbConnection::ptr2int(){
} }
#endif #endif
...@@ -183,7 +183,7 @@ public: ...@@ -183,7 +183,7 @@ public:
Datetime, ///< Precision down to 1 sec (sizeof(Datetime) == 8 bytes ) Datetime, ///< Precision down to 1 sec (sizeof(Datetime) == 8 bytes )
Timespec, ///< Precision down to 1 nsec(sizeof(Datetime) == 12 bytes ) Timespec, ///< Precision down to 1 nsec(sizeof(Datetime) == 12 bytes )
Blob, ///< Binary large object (see NdbBlob) Blob, ///< Binary large object (see NdbBlob)
Clob ///< Text blob Text ///< Text blob
}; };
/** /**
...@@ -309,7 +309,8 @@ public: ...@@ -309,7 +309,8 @@ public:
/** /**
* For blob, set or get "part size" i.e. number of bytes to store in * For blob, set or get "part size" i.e. number of bytes to store in
* each tuple of the "blob table". Must be less than 64k. * each tuple of the "blob table". Can be set to zero to omit parts
* and to allow only inline bytes ("tinyblob").
*/ */
void setPartSize(int size) { setScale(size); } void setPartSize(int size) { setScale(size); }
int getPartSize() const { return getScale(); } int getPartSize() const { return getScale(); }
...@@ -1060,6 +1061,6 @@ public: ...@@ -1060,6 +1061,6 @@ public:
}; };
}; };
class NdbOut& operator <<(class NdbOut& ndbout, const NdbDictionary::Column::Type type); class NdbOut& operator <<(class NdbOut& out, const NdbDictionary::Column& col);
#endif #endif
...@@ -80,7 +80,7 @@ public: ...@@ -80,7 +80,7 @@ public:
Datetime, // Precision down to 1 sec (size 8 bytes) Datetime, // Precision down to 1 sec (size 8 bytes)
Timespec, // Precision down to 1 nsec (size 12 bytes) Timespec, // Precision down to 1 nsec (size 12 bytes)
Blob, // Blob Blob, // Blob
Clob // Text blob Text // Text blob
}; };
Enum m_typeId; Enum m_typeId;
Cmp* m_cmp; // set to NULL if cmp not implemented Cmp* m_cmp; // set to NULL if cmp not implemented
...@@ -125,7 +125,7 @@ private: ...@@ -125,7 +125,7 @@ private:
static Cmp cmpDatetime; static Cmp cmpDatetime;
static Cmp cmpTimespec; static Cmp cmpTimespec;
static Cmp cmpBlob; static Cmp cmpBlob;
static Cmp cmpClob; static Cmp cmpText;
}; };
inline int inline int
...@@ -344,17 +344,15 @@ NdbSqlUtil::cmp(Uint32 typeId, const Uint32* p1, const Uint32* p2, Uint32 full, ...@@ -344,17 +344,15 @@ NdbSqlUtil::cmp(Uint32 typeId, const Uint32* p1, const Uint32* p2, Uint32 full,
break; break;
case Type::Blob: // XXX fix case Type::Blob: // XXX fix
break; break;
case Type::Clob: case Type::Text:
{ {
// skip blob head, the rest is varchar // skip blob head, the rest is char
const unsigned skip = NDB_BLOB_HEAD_SIZE; const unsigned skip = NDB_BLOB_HEAD_SIZE;
if (size >= skip + 1) { if (size >= skip + 1) {
union { const Uint32* p; const char* v; } u1, u2; union { const Uint32* p; const char* v; } u1, u2;
u1.p = p1 + skip; u1.p = p1 + skip;
u2.p = p2 + skip; u2.p = p2 + skip;
// length in first 2 bytes // TODO
int k = strncmp(u1.v + 2, u2.v + 2, ((size - skip) << 2) - 2);
return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
} }
return CmpUnknown; return CmpUnknown;
} }
......
...@@ -161,8 +161,8 @@ NdbSqlUtil::m_typeList[] = { ...@@ -161,8 +161,8 @@ NdbSqlUtil::m_typeList[] = {
NULL // cmpDatetime NULL // cmpDatetime
}, },
{ {
Type::Clob, Type::Text,
cmpClob cmpText
} }
}; };
...@@ -299,9 +299,9 @@ NdbSqlUtil::cmpBlob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size ...@@ -299,9 +299,9 @@ NdbSqlUtil::cmpBlob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size
} }
int int
NdbSqlUtil::cmpClob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size) NdbSqlUtil::cmpText(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{ {
return cmp(Type::Clob, p1, p2, full, size); return cmp(Type::Text, p1, p2, full, size);
} }
#ifdef NDB_SQL_UTIL_TEST #ifdef NDB_SQL_UTIL_TEST
......
This diff is collapsed.
...@@ -89,7 +89,8 @@ NdbConnection::NdbConnection( Ndb* aNdb ) : ...@@ -89,7 +89,8 @@ NdbConnection::NdbConnection( Ndb* aNdb ) :
// Scan operations // Scan operations
theScanningOp(NULL), theScanningOp(NULL),
theBuddyConPtr(0xFFFFFFFF), theBuddyConPtr(0xFFFFFFFF),
theBlobFlag(false) theBlobFlag(false),
thePendingBlobOps(0)
{ {
theListState = NotInList; theListState = NotInList;
theError.code = 0; theError.code = 0;
...@@ -150,6 +151,7 @@ NdbConnection::init() ...@@ -150,6 +151,7 @@ NdbConnection::init()
theBuddyConPtr = 0xFFFFFFFF; theBuddyConPtr = 0xFFFFFFFF;
// //
theBlobFlag = false; theBlobFlag = false;
thePendingBlobOps = 0;
}//NdbConnection::init() }//NdbConnection::init()
/***************************************************************************** /*****************************************************************************
...@@ -269,26 +271,34 @@ NdbConnection::execute(ExecType aTypeOfExec, ...@@ -269,26 +271,34 @@ NdbConnection::execute(ExecType aTypeOfExec,
if (! theBlobFlag) if (! theBlobFlag)
return executeNoBlobs(aTypeOfExec, abortOption, forceSend); return executeNoBlobs(aTypeOfExec, abortOption, forceSend);
// execute prepared ops in batches, as requested by blobs /*
* execute prepared ops in batches, as requested by blobs
* - blob error does not terminate execution
* - blob error sets error on operation
* - if error on operation skip blob calls
*/
ExecType tExecType; ExecType tExecType;
NdbOperation* tPrepOp; NdbOperation* tPrepOp;
int ret = 0;
do { do {
tExecType = aTypeOfExec; tExecType = aTypeOfExec;
tPrepOp = theFirstOpInList; tPrepOp = theFirstOpInList;
while (tPrepOp != NULL) { while (tPrepOp != NULL) {
bool batch = false; if (tPrepOp->theError.code == 0) {
NdbBlob* tBlob = tPrepOp->theBlobList; bool batch = false;
while (tBlob != NULL) { NdbBlob* tBlob = tPrepOp->theBlobList;
if (tBlob->preExecute(tExecType, batch) == -1) while (tBlob != NULL) {
return -1; if (tBlob->preExecute(tExecType, batch) == -1)
tBlob = tBlob->theNext; ret = -1;
} tBlob = tBlob->theNext;
if (batch) { }
// blob asked to execute all up to here now if (batch) {
tExecType = NoCommit; // blob asked to execute all up to here now
break; tExecType = NoCommit;
break;
}
} }
tPrepOp = tPrepOp->next(); tPrepOp = tPrepOp->next();
} }
...@@ -304,26 +314,30 @@ NdbConnection::execute(ExecType aTypeOfExec, ...@@ -304,26 +314,30 @@ NdbConnection::execute(ExecType aTypeOfExec,
if (tExecType == Commit) { if (tExecType == Commit) {
NdbOperation* tOp = theCompletedFirstOp; NdbOperation* tOp = theCompletedFirstOp;
while (tOp != NULL) { while (tOp != NULL) {
NdbBlob* tBlob = tOp->theBlobList; if (tOp->theError.code == 0) {
while (tBlob != NULL) { NdbBlob* tBlob = tOp->theBlobList;
if (tBlob->preCommit() == -1) while (tBlob != NULL) {
return -1; if (tBlob->preCommit() == -1)
tBlob = tBlob->theNext; ret = -1;
tBlob = tBlob->theNext;
}
} }
tOp = tOp->next(); tOp = tOp->next();
} }
} }
if (executeNoBlobs(tExecType, abortOption, forceSend) == -1) if (executeNoBlobs(tExecType, abortOption, forceSend) == -1)
return -1; ret = -1;
{ {
NdbOperation* tOp = theCompletedFirstOp; NdbOperation* tOp = theCompletedFirstOp;
while (tOp != NULL) { while (tOp != NULL) {
NdbBlob* tBlob = tOp->theBlobList; if (tOp->theError.code == 0) {
while (tBlob != NULL) { NdbBlob* tBlob = tOp->theBlobList;
// may add new operations if batch while (tBlob != NULL) {
if (tBlob->postExecute(tExecType) == -1) // may add new operations if batch
return -1; if (tBlob->postExecute(tExecType) == -1)
tBlob = tBlob->theNext; ret = -1;
tBlob = tBlob->theNext;
}
} }
tOp = tOp->next(); tOp = tOp->next();
} }
...@@ -338,7 +352,7 @@ NdbConnection::execute(ExecType aTypeOfExec, ...@@ -338,7 +352,7 @@ NdbConnection::execute(ExecType aTypeOfExec,
} }
} while (theFirstOpInList != NULL || tExecType != aTypeOfExec); } while (theFirstOpInList != NULL || tExecType != aTypeOfExec);
return 0; return ret;
} }
int int
...@@ -397,6 +411,7 @@ NdbConnection::executeNoBlobs(ExecType aTypeOfExec, ...@@ -397,6 +411,7 @@ NdbConnection::executeNoBlobs(ExecType aTypeOfExec,
break; break;
} }
} }
thePendingBlobOps = 0;
return 0; return 0;
}//NdbConnection::execute() }//NdbConnection::execute()
......
...@@ -806,73 +806,90 @@ NdbDictionary::Dictionary::getNdbError() const { ...@@ -806,73 +806,90 @@ NdbDictionary::Dictionary::getNdbError() const {
return m_impl.getNdbError(); return m_impl.getNdbError();
} }
NdbOut& operator <<(NdbOut& ndbout, const NdbDictionary::Column::Type type) // printers
NdbOut&
operator<<(NdbOut& out, const NdbDictionary::Column& col)
{ {
switch(type){ out << col.getName() << " ";
case NdbDictionary::Column::Bigunsigned: switch (col.getType()) {
ndbout << "Bigunsigned"; case NdbDictionary::Column::Tinyint:
out << "Tinyint";
break; break;
case NdbDictionary::Column::Unsigned: case NdbDictionary::Column::Tinyunsigned:
ndbout << "Unsigned"; out << "Tinyunsigned";
break;
case NdbDictionary::Column::Smallint:
out << "Smallint";
break; break;
case NdbDictionary::Column::Smallunsigned: case NdbDictionary::Column::Smallunsigned:
ndbout << "Smallunsigned"; out << "Smallunsigned";
break; break;
case NdbDictionary::Column::Tinyunsigned: case NdbDictionary::Column::Mediumint:
ndbout << "Tinyunsigned"; out << "Mediumint";
break; break;
case NdbDictionary::Column::Bigint: case NdbDictionary::Column::Mediumunsigned:
ndbout << "Bigint"; out << "Mediumunsigned";
break; break;
case NdbDictionary::Column::Int: case NdbDictionary::Column::Int:
ndbout << "Int"; out << "Int";
break; break;
case NdbDictionary::Column::Smallint: case NdbDictionary::Column::Unsigned:
ndbout << "Smallint"; out << "Unsigned";
break;
case NdbDictionary::Column::Tinyint:
ndbout << "Tinyint";
break; break;
case NdbDictionary::Column::Char: case NdbDictionary::Column::Bigint:
ndbout << "Char"; out << "Bigint";
break; break;
case NdbDictionary::Column::Varchar: case NdbDictionary::Column::Bigunsigned:
ndbout << "Varchar"; out << "Bigunsigned";
break; break;
case NdbDictionary::Column::Float: case NdbDictionary::Column::Float:
ndbout << "Float"; out << "Float";
break; break;
case NdbDictionary::Column::Double: case NdbDictionary::Column::Double:
ndbout << "Double"; out << "Double";
break; break;
case NdbDictionary::Column::Mediumint: case NdbDictionary::Column::Decimal:
ndbout << "Mediumint"; out << "Decimal(" << col.getScale() << "," << col.getPrecision() << ")";
break; break;
case NdbDictionary::Column::Mediumunsigned: case NdbDictionary::Column::Char:
ndbout << "Mediumunsigend"; out << "Char(" << col.getLength() << ")";
break;
case NdbDictionary::Column::Varchar:
out << "Varchar(" << col.getLength() << ")";
break; break;
case NdbDictionary::Column::Binary: case NdbDictionary::Column::Binary:
ndbout << "Binary"; out << "Binary(" << col.getLength() << ")";
break; break;
case NdbDictionary::Column::Varbinary: case NdbDictionary::Column::Varbinary:
ndbout << "Varbinary"; out << "Varbinary(" << col.getLength() << ")";
break; break;
case NdbDictionary::Column::Decimal: case NdbDictionary::Column::Datetime:
ndbout << "Decimal"; out << "Datetime";
break; break;
case NdbDictionary::Column::Timespec: case NdbDictionary::Column::Timespec:
ndbout << "Timespec"; out << "Timespec";
break; break;
case NdbDictionary::Column::Blob: case NdbDictionary::Column::Blob:
ndbout << "Blob"; out << "Blob(" << col.getInlineSize() << "," << col.getPartSize()
<< ";" << col.getStripeSize() << ")";
break;
case NdbDictionary::Column::Text:
out << "Text(" << col.getInlineSize() << "," << col.getPartSize()
<< ";" << col.getStripeSize() << ")";
break; break;
case NdbDictionary::Column::Undefined: case NdbDictionary::Column::Undefined:
ndbout << "Undefined"; out << "Undefined";
break; break;
default: default:
ndbout << "Unknown type=" << (Uint32)type; out << "Type" << (Uint32)col.getType();
break; break;
} }
if (col.getPrimaryKey())
return ndbout; out << " PRIMARY KEY";
else if (! col.getNullable())
out << " NOT NULL";
else
out << " NULL";
return out;
} }
...@@ -181,7 +181,7 @@ NdbColumnImpl::equal(const NdbColumnImpl& col) const ...@@ -181,7 +181,7 @@ NdbColumnImpl::equal(const NdbColumnImpl& col) const
case NdbDictionary::Column::Timespec: case NdbDictionary::Column::Timespec:
break; break;
case NdbDictionary::Column::Blob: case NdbDictionary::Column::Blob:
case NdbDictionary::Column::Clob: case NdbDictionary::Column::Text:
if (m_precision != col.m_precision || if (m_precision != col.m_precision ||
m_scale != col.m_scale || m_scale != col.m_scale ||
m_length != col.m_length) { m_length != col.m_length) {
...@@ -1088,7 +1088,7 @@ columnTypeMapping[] = { ...@@ -1088,7 +1088,7 @@ columnTypeMapping[] = {
{ DictTabInfo::ExtDatetime, NdbDictionary::Column::Datetime }, { DictTabInfo::ExtDatetime, NdbDictionary::Column::Datetime },
{ DictTabInfo::ExtTimespec, NdbDictionary::Column::Timespec }, { DictTabInfo::ExtTimespec, NdbDictionary::Column::Timespec },
{ DictTabInfo::ExtBlob, NdbDictionary::Column::Blob }, { DictTabInfo::ExtBlob, NdbDictionary::Column::Blob },
{ DictTabInfo::ExtClob, NdbDictionary::Column::Clob }, { DictTabInfo::ExtText, NdbDictionary::Column::Text },
{ -1, -1 } { -1, -1 }
}; };
...@@ -1253,7 +1253,7 @@ NdbDictionaryImpl::createBlobTables(NdbTableImpl &t) ...@@ -1253,7 +1253,7 @@ NdbDictionaryImpl::createBlobTables(NdbTableImpl &t)
{ {
for (unsigned i = 0; i < t.m_columns.size(); i++) { for (unsigned i = 0; i < t.m_columns.size(); i++) {
NdbColumnImpl & c = *t.m_columns[i]; NdbColumnImpl & c = *t.m_columns[i];
if (! c.getBlobType()) if (! c.getBlobType() || c.getPartSize() == 0)
continue; continue;
NdbTableImpl bt; NdbTableImpl bt;
NdbBlob::getBlobTable(bt, &t, &c); NdbBlob::getBlobTable(bt, &t, &c);
...@@ -1622,7 +1622,7 @@ NdbDictionaryImpl::dropBlobTables(NdbTableImpl & t) ...@@ -1622,7 +1622,7 @@ NdbDictionaryImpl::dropBlobTables(NdbTableImpl & t)
{ {
for (unsigned i = 0; i < t.m_columns.size(); i++) { for (unsigned i = 0; i < t.m_columns.size(); i++) {
NdbColumnImpl & c = *t.m_columns[i]; NdbColumnImpl & c = *t.m_columns[i];
if (! c.getBlobType()) if (! c.getBlobType() || c.getPartSize() == 0)
continue; continue;
char btname[NdbBlob::BlobTableNameSize]; char btname[NdbBlob::BlobTableNameSize];
NdbBlob::getBlobTableName(btname, &t, &c); NdbBlob::getBlobTableName(btname, &t, &c);
......
...@@ -441,7 +441,7 @@ inline ...@@ -441,7 +441,7 @@ inline
bool bool
NdbColumnImpl::getBlobType() const { NdbColumnImpl::getBlobType() const {
return (m_type == NdbDictionary::Column::Blob || return (m_type == NdbDictionary::Column::Blob ||
m_type == NdbDictionary::Column::Clob); m_type == NdbDictionary::Column::Text);
} }
inline inline
......
...@@ -29,6 +29,7 @@ Adjust: 971206 UABRONM First version ...@@ -29,6 +29,7 @@ Adjust: 971206 UABRONM First version
#include <ndb_global.h> #include <ndb_global.h>
#include <NdbOut.hpp> #include <NdbOut.hpp>
#include <NdbRecAttr.hpp> #include <NdbRecAttr.hpp>
#include <NdbBlob.hpp>
#include "NdbDictionaryImpl.hpp" #include "NdbDictionaryImpl.hpp"
#include <NdbTCP.h> #include <NdbTCP.h>
...@@ -147,78 +148,100 @@ NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz){ ...@@ -147,78 +148,100 @@ NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz){
return false; return false;
} }
NdbOut& operator<<(NdbOut& ndbout, const NdbRecAttr &r) NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
{ {
if (r.isNULL()) if (r.isNULL())
{ {
ndbout << "[NULL]"; out << "[NULL]";
return ndbout; return out;
} }
if (r.arraySize() > 1) if (r.arraySize() > 1)
ndbout << "["; out << "[";
for (Uint32 j = 0; j < r.arraySize(); j++) for (Uint32 j = 0; j < r.arraySize(); j++)
{ {
if (j > 0) if (j > 0)
ndbout << " "; out << " ";
switch(r.getType()) switch(r.getType())
{ {
case NdbDictionary::Column::Bigunsigned: case NdbDictionary::Column::Bigunsigned:
ndbout << r.u_64_value(); out << r.u_64_value();
break; break;
case NdbDictionary::Column::Unsigned: case NdbDictionary::Column::Unsigned:
ndbout << r.u_32_value(); out << r.u_32_value();
break; break;
case NdbDictionary::Column::Smallunsigned: case NdbDictionary::Column::Smallunsigned:
ndbout << r.u_short_value(); out << r.u_short_value();
break; break;
case NdbDictionary::Column::Tinyunsigned: case NdbDictionary::Column::Tinyunsigned:
ndbout << (unsigned) r.u_char_value(); out << (unsigned) r.u_char_value();
break; break;
case NdbDictionary::Column::Bigint: case NdbDictionary::Column::Bigint:
ndbout << r.int64_value(); out << r.int64_value();
break; break;
case NdbDictionary::Column::Int: case NdbDictionary::Column::Int:
ndbout << r.int32_value(); out << r.int32_value();
break; break;
case NdbDictionary::Column::Smallint: case NdbDictionary::Column::Smallint:
ndbout << r.short_value(); out << r.short_value();
break; break;
case NdbDictionary::Column::Tinyint: case NdbDictionary::Column::Tinyint:
ndbout << (int) r.char_value(); out << (int) r.char_value();
break; break;
case NdbDictionary::Column::Char: case NdbDictionary::Column::Char:
ndbout.print("%.*s", r.arraySize(), r.aRef()); out.print("%.*s", r.arraySize(), r.aRef());
j = r.arraySize(); j = r.arraySize();
break; break;
case NdbDictionary::Column::Varchar: case NdbDictionary::Column::Varchar:
{ {
short len = ntohs(r.u_short_value()); short len = ntohs(r.u_short_value());
ndbout.print("%.*s", len, r.aRef()+2); out.print("%.*s", len, r.aRef()+2);
} }
j = r.arraySize(); j = r.arraySize();
break; break;
case NdbDictionary::Column::Float: case NdbDictionary::Column::Float:
ndbout << r.float_value(); out << r.float_value();
break; break;
case NdbDictionary::Column::Double: case NdbDictionary::Column::Double:
ndbout << r.double_value(); out << r.double_value();
break; break;
case NdbDictionary::Column::Blob:
{
const NdbBlob::Head* h = (const NdbBlob::Head*)r.aRef();
out << h->length << ":";
const unsigned char* p = (const unsigned char*)(h + 1);
unsigned n = r.arraySize() - sizeof(*h);
for (unsigned k = 0; k < n && k < h->length; k++)
out.print("%02X", (int)p[k]);
j = r.arraySize();
}
break;
case NdbDictionary::Column::Text:
{
const NdbBlob::Head* h = (const NdbBlob::Head*)r.aRef();
out << h->length << ":";
const unsigned char* p = (const unsigned char*)(h + 1);
unsigned n = r.arraySize() - sizeof(*h);
for (unsigned k = 0; k < n && k < h->length; k++)
out.print("%c", (int)p[k]);
j = r.arraySize();
}
break;
default: /* no print functions for the rest, just print type */ default: /* no print functions for the rest, just print type */
ndbout << r.getType(); out << r.getType();
j = r.arraySize(); j = r.arraySize();
if (j > 1) if (j > 1)
ndbout << " %u times" << j; out << " " << j << " times";
break; break;
} }
} }
if (r.arraySize() > 1) if (r.arraySize() > 1)
{ {
ndbout << "]"; out << "]";
} }
return ndbout; return out;
} }
...@@ -55,6 +55,13 @@ int NdbResultSet::nextResult(bool fetchAllowed) ...@@ -55,6 +55,13 @@ int NdbResultSet::nextResult(bool fetchAllowed)
return -1; return -1;
tBlob = tBlob->theNext; tBlob = tBlob->theNext;
} }
/*
* Flush blob part ops on behalf of user because
* - nextResult is analogous to execute(NoCommit)
* - user is likely to want blob value before next execute
*/
if (m_operation->m_transConnection->executePendingBlobOps() == -1)
return -1;
return 0; return 0;
} }
return res; return res;
......
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
#include <NdbOut.hpp> #include <NdbOut.hpp>
class NDBT_Attribute : public NdbDictionary::Column { class NDBT_Attribute : public NdbDictionary::Column {
friend class NdbOut& operator <<(class NdbOut&, const NDBT_Attribute &);
public: public:
NDBT_Attribute(const char* _name, NDBT_Attribute(const char* _name,
Column::Type _type, Column::Type _type,
......
This diff is collapsed.
...@@ -18,35 +18,6 @@ ...@@ -18,35 +18,6 @@
#include <NdbTimer.hpp> #include <NdbTimer.hpp>
#include <NDBT.hpp> #include <NDBT.hpp>
class NdbOut&
operator <<(class NdbOut& ndbout, const NDBT_Attribute & attr){
NdbDictionary::Column::Type type = attr.getType();
ndbout << attr.getName() << " " << type;
switch(type){
case NdbDictionary::Column::Decimal:
ndbout << "(" << attr.getScale() << ", " << attr.getPrecision() << ")";
break;
default:
break;
}
if(attr.getLength() != 1)
ndbout << "[" << attr.getLength() << "]";
if(attr.getNullable())
ndbout << " NULL";
else
ndbout << " NOT NULL";
if(attr.getPrimaryKey())
ndbout << " PRIMARY KEY";
return ndbout;
}
class NdbOut& class NdbOut&
operator <<(class NdbOut& ndbout, const NDBT_Table & tab) operator <<(class NdbOut& ndbout, const NDBT_Table & tab)
{ {
......
...@@ -830,7 +830,8 @@ void NDBT_TestSuite::execute(Ndb* ndb, const NdbDictionary::Table* pTab, ...@@ -830,7 +830,8 @@ void NDBT_TestSuite::execute(Ndb* ndb, const NdbDictionary::Table* pTab,
if(pTab2 == 0 && pDict->createTable(* pTab) != 0){ if(pTab2 == 0 && pDict->createTable(* pTab) != 0){
numTestsFail++; numTestsFail++;
numTestsExecuted++; numTestsExecuted++;
g_err << "ERROR1: Failed to create table " << pTab->getName() << endl; g_err << "ERROR1: Failed to create table " << pTab->getName()
<< pDict->getNdbError() << endl;
tests[t]->saveTestResult(pTab, FAILED_TO_CREATE); tests[t]->saveTestResult(pTab, FAILED_TO_CREATE);
continue; continue;
} }
......
This diff is collapsed.
...@@ -35,6 +35,7 @@ class NdbRecAttr; // Forward declaration ...@@ -35,6 +35,7 @@ class NdbRecAttr; // Forward declaration
class NdbResultSet; // Forward declaration class NdbResultSet; // Forward declaration
class NdbScanOperation; class NdbScanOperation;
class NdbIndexScanOperation; class NdbIndexScanOperation;
class NdbBlob;
typedef enum ndb_index_type { typedef enum ndb_index_type {
UNDEFINED_INDEX = 0, UNDEFINED_INDEX = 0,
...@@ -171,6 +172,7 @@ class ha_ndbcluster: public handler ...@@ -171,6 +172,7 @@ class ha_ndbcluster: public handler
enum ha_rkey_function find_flag); enum ha_rkey_function find_flag);
int close_scan(); int close_scan();
void unpack_record(byte *buf); void unpack_record(byte *buf);
int get_ndb_lock_type(enum thr_lock_type type);
void set_dbname(const char *pathname); void set_dbname(const char *pathname);
void set_tabname(const char *pathname); void set_tabname(const char *pathname);
...@@ -181,7 +183,9 @@ class ha_ndbcluster: public handler ...@@ -181,7 +183,9 @@ class ha_ndbcluster: public handler
int set_ndb_key(NdbOperation*, Field *field, int set_ndb_key(NdbOperation*, Field *field,
uint fieldnr, const byte* field_ptr); uint fieldnr, const byte* field_ptr);
int set_ndb_value(NdbOperation*, Field *field, uint fieldnr); int set_ndb_value(NdbOperation*, Field *field, uint fieldnr);
int get_ndb_value(NdbOperation*, uint fieldnr, byte *field_ptr); int get_ndb_value(NdbOperation*, Field *field, uint fieldnr);
friend int ::get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg);
int get_ndb_blobs_value(NdbBlob *last_ndb_blob);
int set_primary_key(NdbOperation *op, const byte *key); int set_primary_key(NdbOperation *op, const byte *key);
int set_primary_key(NdbOperation *op); int set_primary_key(NdbOperation *op);
int set_primary_key_from_old_data(NdbOperation *op, const byte *old_data); int set_primary_key_from_old_data(NdbOperation *op, const byte *old_data);
...@@ -191,8 +195,8 @@ class ha_ndbcluster: public handler ...@@ -191,8 +195,8 @@ class ha_ndbcluster: public handler
void print_results(); void print_results();
longlong get_auto_increment(); longlong get_auto_increment();
int ndb_err(NdbConnection*); int ndb_err(NdbConnection*);
bool uses_blob_value(bool all_fields);
private: private:
int check_ndb_connection(); int check_ndb_connection();
...@@ -209,13 +213,19 @@ class ha_ndbcluster: public handler ...@@ -209,13 +213,19 @@ class ha_ndbcluster: public handler
NDB_SHARE *m_share; NDB_SHARE *m_share;
NDB_INDEX_TYPE m_indextype[MAX_KEY]; NDB_INDEX_TYPE m_indextype[MAX_KEY];
const char* m_unique_index_name[MAX_KEY]; const char* m_unique_index_name[MAX_KEY];
NdbRecAttr *m_value[NDB_MAX_ATTRIBUTES_IN_TABLE]; // NdbRecAttr has no reference to blob
typedef union { NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
bool m_use_write; bool m_use_write;
bool retrieve_all_fields; bool retrieve_all_fields;
ha_rows rows_to_insert; ha_rows rows_to_insert;
ha_rows rows_inserted; ha_rows rows_inserted;
ha_rows bulk_insert_rows; ha_rows bulk_insert_rows;
ha_rows ops_pending; ha_rows ops_pending;
bool blobs_pending;
// memory for blobs in one tuple
char *blobs_buffer;
uint32 blobs_buffer_size;
}; };
bool ndbcluster_init(void); bool ndbcluster_init(void);
...@@ -231,10 +241,3 @@ int ndbcluster_discover(const char* dbname, const char* name, ...@@ -231,10 +241,3 @@ int ndbcluster_discover(const char* dbname, const char* name,
int ndbcluster_drop_database(const char* path); int ndbcluster_drop_database(const char* path);
void ndbcluster_print_error(int error, const NdbOperation *error_op); void ndbcluster_print_error(int error, const NdbOperation *error_op);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment