Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
eea0069e
Commit
eea0069e
authored
Apr 29, 2004
by
magnus@neptunus.(none)
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
WL#1737 Removed superfluous "NoCommit" from insert
parent
376ef1af
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
70 additions
and
7 deletions
+70
-7
sql/ha_ndbcluster.cc
sql/ha_ndbcluster.cc
+65
-7
sql/ha_ndbcluster.h
sql/ha_ndbcluster.h
+5
-0
No files found.
sql/ha_ndbcluster.cc
View file @
eea0069e
...
...
@@ -991,8 +991,16 @@ int ha_ndbcluster::write_row(byte *record)
to NoCommit the transaction between each row.
Find out how this is detected!
*/
rows_inserted
++
;
if
((
rows_inserted
%
bulk_insert_rows
)
==
0
)
{
// Send rows to NDB
DBUG_PRINT
(
"info"
,
(
"Sending inserts to NDB, "
\
"rows_inserted:%d, bulk_insert_rows: %d"
,
rows_inserted
,
bulk_insert_rows
));
if
(
trans
->
execute
(
NoCommit
)
!=
0
)
DBUG_RETURN
(
ndb_err
(
trans
));
}
DBUG_RETURN
(
0
);
}
...
...
@@ -1679,6 +1687,53 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
DBUG_RETURN
(
0
);
}
/*
Start of an insert, remember number of rows to be inserted, it will
be used in write_row and get_autoincrement to send an optimal number
of rows in each roundtrip to the server
SYNOPSIS
rows number of rows to insert, 0 if unknown
*/
void
ha_ndbcluster
::
start_bulk_insert
(
ha_rows
rows
)
{
int
bytes
,
batch
;
const
NDBTAB
*
tab
=
(
NDBTAB
*
)
m_table
;
DBUG_ENTER
(
"start_bulk_insert"
);
DBUG_PRINT
(
"enter"
,
(
"rows: %d"
,
rows
));
rows_inserted
=
0
;
rows_to_insert
=
rows
;
/*
Calculate how many rows that should be inserted
per roundtrip to NDB. This is done in order to minimize the
number of roundtrips as much as possible. However performance will
degrade if too many bytes are inserted, thus it's limited by this
calculation.
*/
bytes
=
12
+
tab
->
getRowSizeInBytes
()
+
4
*
tab
->
getNoOfColumns
();
batch
=
(
1024
*
256
);
// 1024 rows, with size 256
batch
=
batch
/
bytes
;
//
batch
=
batch
==
0
?
1
:
batch
;
DBUG_PRINT
(
"info"
,
(
"batch: %d, bytes: %d"
,
batch
,
bytes
));
bulk_insert_rows
=
batch
;
DBUG_VOID_RETURN
;
}
/*
End of an insert
*/
int
ha_ndbcluster
::
end_bulk_insert
()
{
DBUG_ENTER
(
"end_bulk_insert"
);
DBUG_RETURN
(
0
);
}
int
ha_ndbcluster
::
extra_opt
(
enum
ha_extra_function
operation
,
ulong
cache_size
)
{
...
...
@@ -2322,9 +2377,9 @@ int ndbcluster_drop_database(const char *path)
longlong
ha_ndbcluster
::
get_auto_increment
()
{
// NOTE If number of values to be inserted is known
// the autoincrement cache could be used here
Uint64
auto_value
=
m_ndb
->
getAutoIncrementValue
(
m_tabnam
e
);
int
cache_size
=
rows_to_insert
?
rows_to_insert
:
32
;
Uint64
auto_value
=
m_ndb
->
getAutoIncrementValue
(
m_tabname
,
cache_siz
e
);
return
(
longlong
)
auto_value
;
}
...
...
@@ -2347,7 +2402,10 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NO_BLOBS
|
HA_DROP_BEFORE_CREATE
|
HA_NOT_READ_AFTER_KEY
),
m_use_write
(
false
)
m_use_write
(
false
),
rows_to_insert
(
0
),
rows_inserted
(
0
),
bulk_insert_rows
(
1024
)
{
DBUG_ENTER
(
"ha_ndbcluster"
);
...
...
sql/ha_ndbcluster.h
View file @
eea0069e
...
...
@@ -127,6 +127,8 @@ class ha_ndbcluster: public handler
const
byte
*
end_key
,
uint
end_key_len
,
enum
ha_rkey_function
end_search_flag
);
void
start_bulk_insert
(
ha_rows
rows
);
int
end_bulk_insert
();
static
Ndb
*
seize_ndb
();
static
void
release_ndb
(
Ndb
*
ndb
);
...
...
@@ -195,6 +197,9 @@ class ha_ndbcluster: public handler
NDB_INDEX_TYPE
m_indextype
[
MAX_KEY
];
NdbRecAttr
*
m_value
[
NDB_MAX_ATTRIBUTES_IN_TABLE
];
bool
m_use_write
;
ha_rows
rows_to_insert
;
ha_rows
rows_inserted
;
ha_rows
bulk_insert_rows
;
};
bool
ndbcluster_init
(
void
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment