Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
f9dd856d
Commit
f9dd856d
authored
Apr 07, 2005
by
msvensson@neptunus.(none)
Browse files
Options
Browse Files
Download
Plain Diff
Merge bk-internal.mysql.com:/home/bk/mysql-5.0
into neptunus.(none):/home/msvensson/mysql/mysql-5.0
parents
7a0e28fe
d936a80c
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
87 additions
and
16 deletions
+87
-16
configure.in
configure.in
+1
-1
mysql-test/r/ndb_basic.result
mysql-test/r/ndb_basic.result
+30
-0
mysql-test/t/ndb_basic.test
mysql-test/t/ndb_basic.test
+25
-0
ndb/src/ndbapi/Ndb.cpp
ndb/src/ndbapi/Ndb.cpp
+22
-14
sql/ha_ndbcluster.cc
sql/ha_ndbcluster.cc
+9
-1
No files found.
configure.in
View file @
f9dd856d
...
@@ -6,7 +6,7 @@ AC_PREREQ(2.50)dnl Minimum Autoconf version required.
...
@@ -6,7 +6,7 @@ AC_PREREQ(2.50)dnl Minimum Autoconf version required.
AC_INIT
(
sql/mysqld.cc
)
AC_INIT
(
sql/mysqld.cc
)
AC_CANONICAL_SYSTEM
AC_CANONICAL_SYSTEM
# Don't forget to also update the NDB lines below.
# Don't forget to also update the NDB lines below.
AM_INIT_AUTOMAKE
(
mysql, 5.0.
4
-beta
)
AM_INIT_AUTOMAKE
(
mysql, 5.0.
5
-beta
)
AM_CONFIG_HEADER
(
config.h
)
AM_CONFIG_HEADER
(
config.h
)
PROTOCOL_VERSION
=
10
PROTOCOL_VERSION
=
10
...
...
mysql-test/r/ndb_basic.result
View file @
f9dd856d
...
@@ -607,3 +607,33 @@ primary key (a))
...
@@ -607,3 +607,33 @@ primary key (a))
engine=ndb
engine=ndb
max_rows=1;
max_rows=1;
drop table t1;
drop table t1;
create table t1
(counter int(64) NOT NULL auto_increment,
datavalue char(40) default 'XXXX',
primary key (counter)
) ENGINE=ndbcluster;
insert into t1 (datavalue) values ('newval');
insert into t1 (datavalue) values ('newval');
select * from t1 order by counter;
counter datavalue
1 newval
2 newval
insert into t1 (datavalue) select datavalue from t1 where counter < 100;
select * from t1 order by counter;
counter datavalue
1 newval
2 newval
3 newval
4 newval
insert into t1 (datavalue) select datavalue from t1 where counter < 100;
select * from t1 order by counter;
counter datavalue
1 newval
2 newval
3 newval
4 newval
35 newval
36 newval
37 newval
38 newval
drop table t1;
mysql-test/t/ndb_basic.test
View file @
f9dd856d
...
@@ -577,3 +577,28 @@ create table t1
...
@@ -577,3 +577,28 @@ create table t1
engine
=
ndb
engine
=
ndb
max_rows
=
1
;
max_rows
=
1
;
drop
table
t1
;
drop
table
t1
;
#
# Test auto_increment
#
connect
(
con1
,
localhost
,,,
test
);
connect
(
con2
,
localhost
,,,
test
);
create
table
t1
(
counter
int
(
64
)
NOT
NULL
auto_increment
,
datavalue
char
(
40
)
default
'XXXX'
,
primary
key
(
counter
)
)
ENGINE
=
ndbcluster
;
connection
con1
;
insert
into
t1
(
datavalue
)
values
(
'newval'
);
insert
into
t1
(
datavalue
)
values
(
'newval'
);
select
*
from
t1
order
by
counter
;
insert
into
t1
(
datavalue
)
select
datavalue
from
t1
where
counter
<
100
;
select
*
from
t1
order
by
counter
;
connection
con2
;
insert
into
t1
(
datavalue
)
select
datavalue
from
t1
where
counter
<
100
;
select
*
from
t1
order
by
counter
;
drop
table
t1
;
ndb/src/ndbapi/Ndb.cpp
View file @
f9dd856d
...
@@ -756,26 +756,28 @@ Remark: Returns a new TupleId to the application.
...
@@ -756,26 +756,28 @@ Remark: Returns a new TupleId to the application.
Uint64
Uint64
Ndb
::
getAutoIncrementValue
(
const
char
*
aTableName
,
Uint32
cacheSize
)
Ndb
::
getAutoIncrementValue
(
const
char
*
aTableName
,
Uint32
cacheSize
)
{
{
D
EBUG_TRACE
(
"getAutoIncrementValue"
);
D
BUG_ENTER
(
"getAutoIncrementValue"
);
const
char
*
internalTableName
=
internalizeTableName
(
aTableName
);
const
char
*
internalTableName
=
internalizeTableName
(
aTableName
);
Ndb_local_table_info
*
info
=
Ndb_local_table_info
*
info
=
theDictionary
->
get_local_table_info
(
internalTableName
,
false
);
theDictionary
->
get_local_table_info
(
internalTableName
,
false
);
if
(
info
==
0
)
if
(
info
==
0
)
return
~
0
;
DBUG_RETURN
(
~
0
)
;
const
NdbTableImpl
*
table
=
info
->
m_table_impl
;
const
NdbTableImpl
*
table
=
info
->
m_table_impl
;
Uint64
tupleId
=
getTupleIdFromNdb
(
table
->
m_tableId
,
cacheSize
);
Uint64
tupleId
=
getTupleIdFromNdb
(
table
->
m_tableId
,
cacheSize
);
return
tupleId
;
DBUG_PRINT
(
"info"
,
(
"value %u"
,
tupleId
));
DBUG_RETURN
(
tupleId
);
}
}
Uint64
Uint64
Ndb
::
getAutoIncrementValue
(
const
NdbDictionary
::
Table
*
aTable
,
Uint32
cacheSize
)
Ndb
::
getAutoIncrementValue
(
const
NdbDictionary
::
Table
*
aTable
,
Uint32
cacheSize
)
{
{
D
EBUG_TRACE
(
"getAutoIncrementValue"
);
D
BUG_ENTER
(
"getAutoIncrementValue"
);
if
(
aTable
==
0
)
if
(
aTable
==
0
)
return
~
0
;
DBUG_RETURN
(
~
0
)
;
const
NdbTableImpl
*
table
=
&
NdbTableImpl
::
getImpl
(
*
aTable
);
const
NdbTableImpl
*
table
=
&
NdbTableImpl
::
getImpl
(
*
aTable
);
Uint64
tupleId
=
getTupleIdFromNdb
(
table
->
m_tableId
,
cacheSize
);
Uint64
tupleId
=
getTupleIdFromNdb
(
table
->
m_tableId
,
cacheSize
);
return
tupleId
;
DBUG_PRINT
(
"info"
,
(
"value %u"
,
tupleId
));
DBUG_RETURN
(
tupleId
);
}
}
Uint64
Uint64
...
@@ -790,39 +792,45 @@ Ndb::getTupleIdFromNdb(const char* aTableName, Uint32 cacheSize)
...
@@ -790,39 +792,45 @@ Ndb::getTupleIdFromNdb(const char* aTableName, Uint32 cacheSize)
Uint64
Uint64
Ndb
::
getTupleIdFromNdb
(
Uint32
aTableId
,
Uint32
cacheSize
)
Ndb
::
getTupleIdFromNdb
(
Uint32
aTableId
,
Uint32
cacheSize
)
{
{
DBUG_ENTER
(
"getTupleIdFromNdb"
);
if
(
theFirstTupleId
[
aTableId
]
!=
theLastTupleId
[
aTableId
]
)
if
(
theFirstTupleId
[
aTableId
]
!=
theLastTupleId
[
aTableId
]
)
{
{
theFirstTupleId
[
aTableId
]
++
;
theFirstTupleId
[
aTableId
]
++
;
return
theFirstTupleId
[
aTableId
];
DBUG_PRINT
(
"info"
,
(
"next cached value %u"
,
theFirstTupleId
[
aTableId
]));
DBUG_RETURN
(
theFirstTupleId
[
aTableId
]);
}
}
else
// theFirstTupleId == theLastTupleId
else
// theFirstTupleId == theLastTupleId
{
{
return
opTupleIdOnNdb
(
aTableId
,
cacheSize
,
0
);
DBUG_PRINT
(
"info"
,(
"reading %u values from database"
,
(
cacheSize
==
0
)
?
1
:
cacheSize
));
DBUG_RETURN
(
opTupleIdOnNdb
(
aTableId
,
(
cacheSize
==
0
)
?
1
:
cacheSize
,
0
));
}
}
}
}
Uint64
Uint64
Ndb
::
readAutoIncrementValue
(
const
char
*
aTableName
)
Ndb
::
readAutoIncrementValue
(
const
char
*
aTableName
)
{
{
D
EBUG_TRACE
(
"readtAutoIncrementValue"
);
D
BUG_ENTER
(
"readtAutoIncrementValue"
);
const
NdbTableImpl
*
table
=
theDictionary
->
getTable
(
aTableName
);
const
NdbTableImpl
*
table
=
theDictionary
->
getTable
(
aTableName
);
if
(
table
==
0
)
{
if
(
table
==
0
)
{
theError
=
theDictionary
->
getNdbError
();
theError
=
theDictionary
->
getNdbError
();
return
~
0
;
DBUG_RETURN
(
~
0
)
;
}
}
Uint64
tupleId
=
readTupleIdFromNdb
(
table
->
m_tableId
);
Uint64
tupleId
=
readTupleIdFromNdb
(
table
->
m_tableId
);
return
tupleId
;
DBUG_PRINT
(
"info"
,
(
"value %u"
,
tupleId
));
DBUG_RETURN
(
tupleId
);
}
}
Uint64
Uint64
Ndb
::
readAutoIncrementValue
(
const
NdbDictionary
::
Table
*
aTable
)
Ndb
::
readAutoIncrementValue
(
const
NdbDictionary
::
Table
*
aTable
)
{
{
D
EBUG_TRACE
(
"readtAutoIncrementValue"
);
D
BUG_ENTER
(
"readtAutoIncrementValue"
);
if
(
aTable
==
0
)
if
(
aTable
==
0
)
return
~
0
;
DBUG_RETURN
(
~
0
)
;
const
NdbTableImpl
*
table
=
&
NdbTableImpl
::
getImpl
(
*
aTable
);
const
NdbTableImpl
*
table
=
&
NdbTableImpl
::
getImpl
(
*
aTable
);
Uint64
tupleId
=
readTupleIdFromNdb
(
table
->
m_tableId
);
Uint64
tupleId
=
readTupleIdFromNdb
(
table
->
m_tableId
);
return
tupleId
;
DBUG_PRINT
(
"info"
,
(
"value %u"
,
tupleId
));
DBUG_RETURN
(
tupleId
);
}
}
Uint64
Uint64
...
...
sql/ha_ndbcluster.cc
View file @
f9dd856d
...
@@ -2989,7 +2989,11 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
...
@@ -2989,7 +2989,11 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
DBUG_PRINT
(
"enter"
,
(
"rows: %d"
,
(
int
)
rows
));
DBUG_PRINT
(
"enter"
,
(
"rows: %d"
,
(
int
)
rows
));
m_rows_inserted
=
0
;
m_rows_inserted
=
0
;
m_rows_to_insert
=
rows
;
if
(
rows
==
0
)
/* We don't know how many will be inserted, guess */
m_rows_to_insert
=
m_autoincrement_prefetch
;
else
m_rows_to_insert
=
rows
;
/*
/*
Calculate how many rows that should be inserted
Calculate how many rows that should be inserted
...
@@ -4099,6 +4103,10 @@ ulonglong ha_ndbcluster::get_auto_increment()
...
@@ -4099,6 +4103,10 @@ ulonglong ha_ndbcluster::get_auto_increment()
DBUG_ENTER
(
"get_auto_increment"
);
DBUG_ENTER
(
"get_auto_increment"
);
DBUG_PRINT
(
"enter"
,
(
"m_tabname: %s"
,
m_tabname
));
DBUG_PRINT
(
"enter"
,
(
"m_tabname: %s"
,
m_tabname
));
Ndb
*
ndb
=
get_ndb
();
Ndb
*
ndb
=
get_ndb
();
if
(
m_rows_inserted
>
m_rows_to_insert
)
/* We guessed too low */
m_rows_to_insert
+=
m_autoincrement_prefetch
;
cache_size
=
cache_size
=
(
m_rows_to_insert
-
m_rows_inserted
<
m_autoincrement_prefetch
)
?
(
m_rows_to_insert
-
m_rows_inserted
<
m_autoincrement_prefetch
)
?
m_rows_to_insert
-
m_rows_inserted
m_rows_to_insert
-
m_rows_inserted
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment